@Preamble{
"\hyphenation{ }" #
"\ifx \undefined \circled \def \circled #1{(#1)}\fi" #
"\ifx \undefined \reg \def \reg {\circled{R}}\fi"
}
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|https://www.math.utah.edu/~beebe/|"}
@String{j-SIGMETRICS = "ACM SIGMETRICS Performance Evaluation Review"}
@String{pub-ACM = "ACM Press"}
@String{pub-ACM:adr = "New York, NY 10036, USA"}
@Article{Keirstead:1972:STC,
author = "Ralph E. Keirstead and Donn B. Parker",
title = "Software testing and certification",
journal = j-SIGMETRICS,
volume = "1",
number = "1",
pages = "3--8",
month = mar,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041596.1041597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:42 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Society needs a continuous flow of upgradding products
and services which are responsive to needs, are
reliable, cost-effective and safe. When this does not
occur, excessive regulation and resulting stifled
technology and production results. Excesses in both
directions have occurred in other fields such as
medicine, the automobile industry, petro-chemicals,
motion pictures, building construction and
pharmaceuticals. Disasters based on poor design and
implementation in information processing have occurred
in ballot-counting systems, law enforcement systems,
billing systems, credit systems and dating services.
Business has been undersold and oversold and sometimes
reached the brink of ruin in its increasing reliance on
computer systems. The only answer is a balanced degree
of self-regulation. Such self-regulation for software
systems is presented here.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bell:1972:CME,
author = "Thomas E. Bell",
title = "Computer measurement and evaluation: artistry, or
science?",
journal = j-SIGMETRICS,
volume = "1",
number = "2",
pages = "4--10",
month = jun,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113640.1113641",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Effort invested in computer measurement and evaluation
is clearly increasing, but the results of this
increasing investment may be unfortunate. The
undeniable value of the results and the enthusiasm of
participants may be leading to unrealizable
expectations. The present artistry needs to be
converted into a science for achieving a solid future;
the most fruitful direction may be the synthesis of
individual, empirical discoveries combined with testing
hypotheses about performance relationships.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Palme:1972:BGM,
author = "Jacob Palme",
title = "Beware of the {Gibson} mix",
journal = j-SIGMETRICS,
volume = "1",
number = "2",
pages = "10--11",
month = jun,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113640.1113642",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Evaluation of computer systems is sometimes made using
a so-called Gibson mix. This is a list of common
machine instructions with weights depending on how
often they are supposed to occur in typical programs.
By using these weights to estimate the mean instruction
execution time, the `speed' of a computer system is
supposed to be measured.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Johnson:1972:SST,
author = "Robert R. Johnson",
title = "Some steps toward an information system performance
theory",
journal = j-SIGMETRICS,
volume = "1",
number = "3",
pages = "4--15",
month = sep,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041599.1041600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A means for representing information handling systems
at the problem, program, and computer level is
presented. This means, Petri Nets, coupled with
classical information theory, provides quantitative
measures of system capacity and thruput as well
measures of `the work done.' Concepts of
information-capacity and of information-work are
derived from these probabilistically labeled Petri Nets
based on analogies to thermodynamics. Thruput is
measured as information-gain. Comments are made about
the possible significance of these concepts, their
relationship to classical thermodynamics, and the
directions of continuing thought stimulated by these
concepts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kernighan:1972:CAO,
author = "B. W. Kernighan and P. J. Plauger and D. J. Plauger",
title = "On comparing apples and oranges, or, my machine is
better than your machine",
journal = j-SIGMETRICS,
volume = "1",
number = "3",
pages = "16--20",
month = sep,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041599.1041601",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a recent comparison test, six computer
manufacturers were asked to code a particular program
loop to run as quickly as possible on their machine.
Presumably conclusions about the merits of the machines
were to be drawn from the resulting code. We have
reduced the number of Instructions for the loop by an
average of one instruction per machine, a 15\%
decrease. It appears that conclusions might more
appropriately be drawn about manufacturers' software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lynch:1972:DDA,
author = "W. C. Lynch",
title = "Do disk arms move?",
journal = j-SIGMETRICS,
volume = "1",
number = "4",
pages = "3--16",
month = dec,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041603.1041604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:54 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement of the lengths of disk arm movements in a
2314 disk storage facility of an IBM 360/67 operating
under the Michigan Terminal System yielded the
unexpected data that the arms need not move in 63\% of
the accesses and need move for an average of only 30ms.
in the remaining 37\% of the cases. A description and
analysis of a possible mechanism of action is
presented. The predictions of this model do not
disagree with the measured data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Halstead:1973:LLM,
author = "M. H. Halstead",
title = "Language level, a missing concept in information
theory",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "7--9",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "According to Information Theory, [Cf Leon Brillouin,
Science and Information Theory, Academic Press, N. Y.
1956, pp. 292-3], the information content of a table of
numbers does not depend upon how difficult it was to
obtain the entries in the table, but only upon whether
or not we know how, or how precisely we know how, to
reconstruct the entire table or any parts of it.
Consequently, from present Information Theory, since we
`know in advance' how a table of since is constructed,
such a table contains absolutely no information. For a
person who does not `know in advance' how to construct
a table of sines, however, the table would indeed
contain `Information.' This ambiguity apparently
contradicts the basic statement [Leon Brillouin, op.
cit., page 10] that `Information is an absolute
quantity which has the same numerical value for any
observer,' a contradiction which remains even when we
accept Brillouin's next statement that `The human value
of the information, on the other hand, would
necessarily be a relative quantity, and would have
different values for different observers, according to
the possibility of their understanding it and using it
later.'",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Halstead:1973:EDP,
author = "M. H. Halstead",
title = "An experimental determination of the `purity' of a
trivial algorithm",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "10--15",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work in an area which might be designated as
Software Physics [1,2,3,4,5,6] has suggested that the
basic structure of algorithms may offer an interesting
field for experimental research. Such an experiment is
reported here. In an earlier paper [2], it was
suggested that a `Second Law' might be stated as:'The
internal quality, LV, of a pure algorithm is
independent of the language in which it is
expressed.'",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1973:RSC,
author = "Peter J. Denning",
title = "Review of {`Statistical Computer Performance
Evaluation' by Walter Frieberger; Academic Press
(1972)}",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "16--22",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is the proceedings of a conference held at
Brown University on November 22-23, 1971. The editors
state that only papers dealing with real data in a
reasonably sophisticated manner were accepted for the
conference. Papers dealing simply with the collection
of data, or with queueing-theoretic models, were
excluded. The papers are grouped into seven sections
corresponding to the seven sessions at the conference;
at the end of each section is a brief statement by the
one or two discussants of that session's papers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Svobodova:1973:CSN,
author = "Liba Svobodova",
title = "Communications: Some notes on the {Computer Synectics}
hardware monitor sum",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "23--25",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The longer I have been working with the hardware
monitor SUM, a device designed and manufactured by the
Computer Synectics, the less I have been pleased.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ishida:1973:JSU,
author = "Haruhisa Ishida and Nobumasa Takahashi",
title = "Job statistics at a 2000-user university computer
center",
journal = j-SIGMETRICS,
volume = "2",
number = "2",
pages = "2--13",
month = jun,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113644.1113645",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Computer Centre at the University of Tokyo is one
of 7 large university centers serving researchers
throughout Japan; it processes 120,000 jobs annually
submitted by 2,000 academic users in various research
institutions. A brief comparison of the 7 centers and
the breakdown of users are shown. To clarify the job
characteristics of these users, account data of all
jobs in an entire year were analyzed and the results
are presented. They are shown in terms of the
distribution of CPU time, numbers of input cards/output
pages/output cards, program size, job end conditions
and turnaround time etc. A special on-line card punch
is mentioned which punches holes in the 13th row to
separate output card decks. It was found that, when the
CPU speed was increased 8 times after replacement under
the same operating system, the average job size was
increased 4 times. Hence only twice as many jobs could
be processed. The results of analysis have been used
for systems performance evaluation (for example, the
CPU busy-rate was found to be 69\%), improvement and
for an input job model used in planning for the next
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rice:1973:AMC,
author = "Don R. Rice",
title = "An analytical model for computer system performance
evaluation",
journal = j-SIGMETRICS,
volume = "2",
number = "2",
pages = "14--30",
month = jun,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113644.1113646",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an analytical model of a computer
system useful in the evaluation of system performance.
The model is described in detail while the mathematics
are minimized. Emphasis is placed on the utility of the
model rather than the underlying theory and a number of
illustrative examples are included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kolence:1973:SE,
author = "Kenneth W. Kolence",
title = "The software empiricist",
journal = j-SIGMETRICS,
volume = "2",
number = "2",
pages = "31--36",
month = jun,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113644.1113647",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The advent of software and hardware monitoring
technology has presented us with a flood of data,
without bringing commensurate understanding by which to
interpret it. Thus, the most important problem before
us in the field of computer measurement is to discover
the relationships between the variables we measure and
the overall system properties of interest.
Particularly, we wish to be able to predict system
behavior and performance from a knowledge of the values
of factors under our control. In this way, not only
will we understand the meanings of these variables, but
we shall learn how to design our systems to perform as
we wish them to. The latter is a prime goal of software
engineering, the former the rational of what has been
called software physics. In this section of the Review
we are and shall be interested in the empirical
development of such an understanding, and the
experimental aspects of computer measurement. Our
intent is to assist in the building of a solid body of
knowledge by providing a publication vehicle for
empirical and experimental data. That is, we have
little interest in publishing theory, which can
normally be done elsewhere. Our goal is to publish
experimental data to support or refute theory, and
empirical data from which theory builders may take
their inspiration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kolence:1973:SUP,
author = "Kenneth W. Kolence and Philip J. Kiviat",
title = "Software unit profiles \& {Kiviat} figures",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "2--12",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the June, 1973 issue of the {\em Performance
Evaluation Review}, the concept of using circular
graphs (called Kiviat graphs by Kolence) to present
system performance data was introduced in the column
{\em The Software Empiricist}. In this article we wish
to report on some recent work in using such graphs to
present system and program profiles in a strikingly
visual way of potential use to all practitioners of
computer measurement. In discussing this data, we find
it necessary to comment on the meaning of the variables
used for such profiles in a way which also should be of
interest to practitioners.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1973:WOA,
author = "Peter J. Denning",
title = "Why our approach to performance evaluation is
{SDRAWKCAB}",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "13--16",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What does SDRAWKCAB mean? Some of you already know;
some I have told; some have guessed. But many do not
know. Those who do know, know it would be contrary to
the theme of SDRAWKCAB to tell you immediately what it
means, although it certainly would make things much
easier if I told you now.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Beck:1973:CSL,
author = "Norman Beck and Gordon Ashby",
title = "On cost of static linking and loading of subprograms",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "17--20",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to report some data
concerning cost in CPU processing due to loading
programs. The data was collected on a PDP-10, using
modifications made by the linking loader to the
prologue generated for FORTRAN complied programs, by
the addition of one UUO (a programmed operation similar
to an SVC on IBM 360/370), and several cells in the
monitor used as counters. The data covers the number of
programs loaded and the CPU ms expended loading them.
This data is broken down between programs that were
loaded and never entered and programs loaded and
eventually executed. It is further classified according
to periods of heavy use for program development and
periods of heavy production use.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kolence:1973:SEE,
author = "Ken Kolence",
title = "The software empiricist experimental disciplines \&
computer measurements",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "21--23",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The introduction and use of the capability for
quantitative measurements into the field of computer
science must inexorably lead to the development and use
of experimental approaches and techniques to discover,
understand, and verify relationships between the
observables of what is today loosely called computer
performance. The reason for this column appearing as a
regular feature in PER is to assist in the process of
bridging the gap in both directions between the
practitioners and theorists of the field. In the first
column in this series, we introduced the concepts of
empiricism and the initial discoveries of invariances
of values as foundations of this new aspect of computer
science. With this issue, we shall begin to investigate
the requirements and methodologies by which this
approach can be applied to the common benefit of both
the practical and theoretical orientations. When a
particular topic can be demonstrated with actual data
or equivalent means, it will be the topic of a separate
article.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1973:UHM,
author = "James Hughes and David Cronshaw",
title = "On using a hardware monitor as an intelligent
peripheral",
journal = j-SIGMETRICS,
volume = "2",
number = "4",
pages = "3--19",
month = dec,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113650.1113651",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:20 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Conventionally, hardware monitoring has been performed
using manually controlled off-line devices. It is
suggested that a hardware monitor incorporating program
control and acting as an intelligent peripheral device
would realize greater utility and wider application.
The development and application of such a device is
described; a combination of the merits of both software
and hardware monitoring techniques is claimed for it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Svobodova:1973:MCS,
author = "Liba Svobodova",
title = "Measuring computer system utilization with a hardware
and a hybrid monitor",
journal = j-SIGMETRICS,
volume = "2",
number = "4",
pages = "20--34",
month = dec,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113650.1113652",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:20 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer system utilization is generally measured in
terms of the utilization of individual system
components and the overlap of activities of two or more
system components. This type of data can be used to
construct a system performance profile [BONN 69, COCI
71, SUM 70]. Utilization of a system component is
obtained as the ratio (unit busy time)/(total elapsed
time). If a particular unit performs more than one type
of operation, the unit busy time may be further divided
into portions corresponding to different activities and
an activity profile can be constructed for each such
unit. For a storage unit, information about utilization
of different portions of storage might be desirable in
addition to utilization of this unit as a whole. A
space utilization profile Can be developed in this
case. To cover both cases, the term unit utilization
profile is used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wortman:1974:NHR,
author = "David B. Wortman",
title = "A note on high resolution timing",
journal = j-SIGMETRICS,
volume = "3",
number = "1",
pages = "3--9",
month = mar,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041619.1041620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ability to accurately time the execution of
sequences of machine instructions is an important tool
in the tuning and evaluation of computer hardware and
software. The complexity of modern hardware and
software systems often makes accurate timing
information difficult to obtain [1]. This note
describes an experimental comparison of timing
information provided by a large multiprogramming
operating system (OS/360 MVT) with timing information
derived directly from a high resolution hardware clock.
The hardware clock was found to be a superior source of
timing information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snyder:1974:QSA,
author = "Rowan Snyder",
title = "A quantitative study of the addition of extended core
storage",
journal = j-SIGMETRICS,
volume = "3",
number = "1",
pages = "10--33",
month = mar,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041619.1041621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In evaluating computer systems it is necessary to
identify the prime determinants of system performance,
and to quantify a performance metric. The purpose of
this paper is to present a quantitative study of the
effects of a significant hardware reconfiguration on
some measures of system performance, and thereby
demonstrate the effectiveness of Kiviat graphs in
performance analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merrill:1974:TCA,
author = "H. E. Barry Merrill",
title = "A technique for comparative analysis of {Kiviat}
graphs",
journal = j-SIGMETRICS,
volume = "3",
number = "1",
pages = "34--39",
month = mar,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041619.1041622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The article in September, 1973 Performance Evaluation
Review demonstrated again the utility of the Kiviat
Graph as a visual display of system profiles. A simple
extension of the concept of the Kiviat Graph permits a
realistic (though not necessarily linear) comparison of
two Kiviat graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peterson:1974:CSH,
author = "Thomas G. Peterson",
title = "A comparison of software and hardware monitors",
journal = j-SIGMETRICS,
volume = "3",
number = "2",
pages = "2--5",
month = jun,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041687.1041688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tests were performed to compare the accuracy of two
computer system monitors. Specifically, results from a
hardware monitor were compared with results from a
software monitor. Some of the subreports produced by
the software monitor were quite accurate; other
subreports were not quite so accurate, but they were
consistent from run to run. In view of these test
results, it appears that the software monitor can be
used to measure the effects of changes made in a system
tuning project.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Syms:1974:BCT,
author = "Gordon H. Syms",
title = "Benchmarked comparison of terminal support systems for
{IBM 360} computers",
journal = j-SIGMETRICS,
volume = "3",
number = "2",
pages = "6--34",
month = jun,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041687.1041689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A set of terminal scripts and benchmarks were derived
for comparing the performance of time sharing and batch
computer operating systems. Some of the problems
encountered in designing valid benchmarks for comparing
computer operating systems under both terminal and
batch loads are discussed. The results of comparing
TSS/360, CP/67 and MTS time sharing systems for the IBM
360/67 over a wide range of load conditions are
presented. The results of comparing TSS, MTS and OS/MVT
under batch loads are also presented. The tests were
conducted with Simplex and Dual processor
configurations with 256K bytes to 768K bytes of main
memory. The conclusions were quite surprising in that
CP/67 running on a minimal system performed
competitively with TSS/360 on a much larger dual
processor system. With equal configurations CP/67 out
performed TSS/360 by a wide margin. Furthermore, MTS
providing both batch and terminal support produced
performance that was 5 percent to 25 percent better
than the split configuration with CP/67 providing the
terminal support and OS/MVT providing the batch
processing support. Serious performance degradation of
the time sharing computer systems from overloading was
experienced and a simple solution is suggested to
prevent such degradation. The degradation was so severe
as to render the performance less than that of a
sequential job processor system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morris:1974:KGC,
author = "Michael F. Morris",
title = "{Kiviat} graphs: conventions and `figures of merit'",
journal = j-SIGMETRICS,
volume = "3",
number = "3",
pages = "2--8",
month = oct,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041691.1041692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Once in a very great while an idea comes along that
quickly captures many imaginations. The circular
graphic technique proposed nearly two years ago by Phil
Kiviat, our illustrious Chairman, and very
appropriately named `Kiviat Graphs' by our erst-while
(and sorely missed) `Software Empiricist,' Ken Kolence,
is one of these ideas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lujanac:1974:NSB,
author = "Paul L. Lujanac",
title = "A note on {Syms}' benchmarked comparison",
journal = j-SIGMETRICS,
volume = "3",
number = "3",
pages = "9--10",
month = oct,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041691.1041693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "If the load factor is expressed linearly as a fraction
of the capacity of a terminal-oriented system, we
assume that response times increase more or less
exponentially with an increase in load factor. Syms'
load factor is nonlinear, and, in fact, was designed to
`make the terminal response times approximately a
linear function of the load factors.'",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Graham:1974:MPB,
author = "G. Scott Graham and Peter J. Denning",
title = "Multiprogramming and program behavior",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "1--8",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809367",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic multiprogramming memory management strategies
are classified and compared using extant test data.
Conclusions about program behavior are then drawn.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwain:1974:MPV,
author = "A. Brandwain and J. Buzen and E. Gelenbe and D.
Potier",
title = "A model of performance for virtual memory systems",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "9--9",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809368",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models are well suited for analyzing
certain resource allocation problems associated with
operating system design. An example of such a problem
is the selection of the level of multiprogramming in
virtual memory systems. If the number of programs
actively competing for main memory is allowed to reach
too high a value, trashing will occur and performance
will be seriously degraded. On the other hand,
performance may also suffer if the level of
multiprogramming drops too low since system resources
can become seriously under utilized in this case. Thus
it is important for virtual memory systems to maintain
optimal or near optimal levels of multiprogramming at
all times. This paper presents an analytic model of
computer system behavior which can be used to study
multiprogramming optimization in virtual memory
systems. The model, which explicitly represents the
numerous interactions which occur as the level of
multiprogramming varies, is used to numerically
generate performance curves for representative sets of
parameters. A simplified model consisting of a CPU and
a single backing store device is then used to derive an
approximate expression for the optimal level of
multiprogramming. The simplified model is also used to
examine the transient behavior of such systems. The
mathematical model we present is based on some
simplifying assumptions; in particular all programs
executing in the system are supposed to be
statistically identical. In this respect the model we
present must be considered to be a theoretical
explanation of a phenomenon (thrashing) observed in
certain operating systems rather than an exact
representation of reality. Certain assumptions of the
mathematical model are relaxed in a simulation model
where distribution functions of service times at the
secondary memory and input-output devices are
arbitrary; by comparison with the theoretical results
we see that CPU utilization and throughput are not very
sensitive to the specific forms of these distributions
and that the usual exponential assumptions yield quite
satisfactory results. The simulation model is also
programmed to contain overhead. Again we observe that
the mathematical model's predictions are in fair
agreement with the useful CPU utilization predicted by
the simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
xxnote = "Check: author may be Brandwajn??",
}
@Article{Henderson:1974:OCW,
author = "Greg Henderson and Juan Rodriguez-Rosell",
title = "The optimal choice of window sizes for working set
dispatching",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "10--33",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809369",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of varying window size in a working set
dispatcher to control working set size and number of
page faults is examined. A space-time cost equation is
developed and used to compare fixed window size to
variable window size for different types of secondary
storage based on the simulated execution of real
programs. A general approach is indicated for studying
the relative merit of the two dispatching algorithms
and their interaction with different hardware
configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Dispatching; Optimal control; Resource allocation;
Supervisory systems; Time-sharing systems; Working
set",
}
@Article{Denning:1974:CLP,
author = "Peter J. Denning",
title = "Comments on a linear paging model",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "34--48",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809370",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The linear approximation relating mean time between
page transfers between levels of memory, as reported by
Saltzer for Multics, is examined. It is tentatively
concluded that this approximation is untenable for main
memory, especially under working set policies; and that
the linearity of the data for the drum reflects the
behavior of the Multics scheduler for background jobs,
not the behavior of programs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brice:1974:FCR,
author = "Richard S. Brice and J. C. Browne",
title = "Feedback coupled resource allocation policies in the
multiprogramming-multiprocessor computer system",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "49--53",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809371",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents model studies of some integrated
feedback-driven scheduling systems for a
multiprogrammed computer system. This abstract can
present only the conclusions of the studies and little
of the supporting data and detail. The basic format of
the analysis is to fix a size for the local buffers and
a total size for the collection buffers, to define a
set of algorithms for the determination of the data
removal quanta to the local buffers, the allocation of
space in the collection buffers, and the look-ahead
mechanism for input and then to evaluate the relative
merits of the various strategies by the resulting CPU
efficiency. Three feedback algorithms are studied as
examples in this work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Halachmi:1974:CCT,
author = "Baruch Halachmi and W. R. Franta",
title = "A closed, cyclic, two-stage multiprogrammed system
model and its diffusion approximation solution",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "54--64",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809372",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper attention is focused on closed
multiprogrammed computer type systems. In particular,
two-stage closed queueing systems are considered. The
first stage can be associated with the CPU (Central
Processing Unit) and the other with the I/O
(Input-Output) operations. For all the models
discussed. For the first model we consider the
{GI1/MS/N} system, which allows the service times of a
single CPU to obey any general probability
distribution, with finite variance, while the I/O
servers are taken to be exponential. The second model
is an extension of the first where the concept of
feedback is implemented in the CPU stage. This concept
plays an important role in computer environments where
the operating system includes the multiplexing or page
on demand property. The third model, the {MS1/MS2/N},
deals with multiprocessing computer systems where
possibly more than one CPU is available, but all
servers are assumed to be exponential. In the spirit of
the approximation to the GI/G/S open system, as a final
model, we construct the approximate solution to the
{GIS1/GIS2/N} closed system and discuss the
circumstances under which its use is advisable. Several
numerical examples for each of the models are given,
each accompanied by appropriate simulation results for
comparison. It is on the basis of these comparisons
that the quality of the suggested diffusion
approximations can be judged. The diffusion
approximating formulas should be regarded not only as a
numerical technique, but also as a simplifying
approach, by which deeper insight can be gained into
complicated queueing systems. Considerable work remains
to be done, using as a methodology the approach, given
here, and several possible extensions are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schwetman:1974:ATS,
author = "H. D. Schwetman",
title = "Analysis of a time-sharing subsystem (a preliminary
report)",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "65--75",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The MESA subsystem provides a wide variety of services
to remotely located users of the computing facilities
of the Purdue University Computing Center. This paper
presents the preliminary steps of an in-depth study
into the behavior of MESA. The study uses a software
data-gathering facility to analyze the usage and
queueing aspects of this behavior and to provide values
for parameters used by two models of the subsystem.
These models, a network-of-queues model and a
simulation model, are designed to project subsystem
behavior in different operating environments. The paper
includes a number of tables and figures which highlight
the results, so far, of the study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiser:1974:ASC,
author = "M. Reiser and A. G. Konheim",
title = "The analysis of storage constraints by a queueing
network model with blocking",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "76--81",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809374",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The finite capacity of storage has a significant
effect on the performance of a contemporary computer
system. Yet it is difficult to formulate this problem
and analyze it by existing queueing network models. We
present an analysis of an open queueing model with two
servers in series in which the second server has finite
storage capacity. This network is an exponential
service system; the arrival of requests into the system
is modeled by a Poisson process (of rate $ \lambda $)
and service times in each stage are exponentially
distributed (with rates $ \alpha $ and $ \beta $
respectively). Requests are served in each stage
according to the order of their arrival. The principal
characteristic of the service in this network is
blocking; when $M$ requests are queued or in service in
the second stage, the server in the first stage is
blocked and ceases to offer service. Service resumes in
the first stage when the queue length in the second
stage falls to $ M - 1$. Neuts [1] has studied
two-stage blocking networks (without feedback) under
more general statistical hypothesis than ours. Our goal
is to provide an algorithmic solution which may be more
accessible to engineers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schatzoff:1974:SVT,
author = "M. Schatzoff and C. C. Tillman",
title = "Statistical validation of a trace-driven simulator",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "82--93",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common problem encountered in computer system
simulation is that of validating that the simulator can
produce, with a reasonable degree of accuracy, the same
information that can be obtained from the modelled
system. This is basically a statistical problem because
there are usually limitations with respect to the
number of controlled tests which can be carried out,
and assessment of the fidelity of the model is a
function of the signal to noise ratio. That is, the
magnitude of error which can be tolerated depends upon
the size of the effect to be predicted. In this paper,
we describe by example how techniques of statistical
design and analysis of experiments have been used to
validate the modeling of the dispatching algorithm of a
time sharing system. The examples are based on a
detailed, trace-driven simulator of CP-67. They show
that identical factorial experiments involving
parameters of this algorithm, when carried out on both
the simulator and on the actual system, produced
statistically comparable effects.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1974:GPS,
author = "Domenico Ferrari and Mark Liu",
title = "A general-purpose software measurement tool",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "94--105",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A software measurement tool designed for the users of
PRIME, an interactive system being developed, is
presented. The tool, called SMT, allows its user to
instrument a program, modify a pre-existing
instrumentation and specify how the collected data are
to be reduced by typing in a few simple commands. The
user can also write his own measurement routines, which
specify the actions to be taken at event detection
time, and submit them to the SMT; after checking their
correctness, the SMT deals with them as with its
built-in, standard measurement routines. The design
goals of a general-purpose tool like the SMT are
discussed, and the prototype version of the tool, which
has been implemented, is described from the two
distinct viewpoints of a user and of a measurement-tool
designer. An example of the application of the
prototype to a measurement problem is illustrated, the
reasons why not all of the design goals have been
achieved in the implementation of the prototype are
reviewed, and some of the foreseeable extensions of the
SMT are described.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Foley:1974:EDD,
author = "James D. Foley and John W. McInroy",
title = "An event-driven data collection and analysis facility
for a two-computer network",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "106--120",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe an event-driven data
collection facility, and a general-purpose program to
perform a set of analyses on the collected data. There
are several features which distinguish this facility
from others. First, the system being monitored is a
network of loosely-coupled computers. Although there
are just two computers in the network, the facility
could be readily extended to larger networks. Second,
the main purpose of the facility is to monitor the
execution of interactive graphics application programs
whose processing and data are distributed between the
network's computers. Third, the data collector and
analyzer are readily extendible to treat new kinds of
data. This is accomplished by a data and event
independent collector, and a table-driven data
analyzer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Batson:1974:MVM,
author = "A. P. Batson and R. E. Brundage",
title = "Measurements of the virtual memory demands of
{Algol-60} programs (Extended Abstract)",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "121--126",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809378",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Programming languages such as Algol-60 use block
structure to express the way in which the name space of
the current environment, in the contour model (1) sense
of that word, changes during program execution. This
dynamically-varying name space corresponds to the
virtual memory required by the process during its
execution on a computer system. The research to be
presented is an empirical study of the nature of the
memory demands made by a collection of Algol-60
programs during execution. The essential
characteristics of any such resource request are the
amount of memory requested, and the holding time for
which the resource is retained and these distributions
will be presented for several components of the virtual
memory required by the Algol programs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sebastian:1974:HHE,
author = "Peter R. Sebastian",
title = "{HEMI} ({Hybrid Events Monitoring Instrument})",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "127--139",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "HEMI is an experimental instrumentation system being
developed for use with the CYBER 70 and 170 Series
computers in order to ascertain the extent to which an
integrated approach to instrumentation is economically
and technologically viable for performance measurement
and evaluation purposes. HEMI takes advantage of the
distributed CYBER computer architecture. This consists
of a pool of Peripheral Processors (PPs) --- (mainly
dedicated to I/O and system tasks) while the CPU
capabilities are reserved mostly for computation;
Central Memory constitutes the communications link.
HEMI uses one of the PPs as its major processor. A
hardware data acquisition front end is interfaced to
one of the I/O channels and driven by the PP. Hardware
probes sample events at suitable testpoints, while the
PP has software access to Central Memory (Operating
System tables and parameters), Status Registers, I/O
Channel Flags, etc. A data reduction package is used to
produce a variety of reports from the data collected. A
limited on-line data reduction and display capability
is also provided. This paper will describe the current
status of the project as well as anticipated
applications of HEMI.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1974:IAC,
author = "Springer W. Cox",
title = "Interpretive analysis of computer system performance",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "140--155",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A typical performance evaluation consists of the
identification of resources, the definition of system
boundaries, the measurement of external and internal
performance variables, and finally the interpretation
of data and projection of system performance to
hypothetical environments. These projections may be
used to estimate the cost savings to be expected when
changes are made to the system. The fundamental
external performance measures such as response time and
thruput are intimately related, but may be defined
differently depending on how the system is defined.
They can be analyzed with respect to the internal
performance measures (such as activities, queue lengths
and busy times) by applying one or more interpretations
such as: absolute utilizations, normalized busy times,
system profiles, analysis of response, workload
relaxation, and resource consumption hyperplanes. These
models, which are generally free of assumptions
regarding interarrival and service time distributions,
can be adjusted to represent potential changes to the
system. Then the interpretations may be used to
evaluate the predicted external performance measures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Noe:1974:DYC,
author = "J. D. Noe and N. W. Runstein",
title = "Develop your computer performance pattern",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "156--165",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Is the load on your computer shifting? Did that change
to faster access disks really help? Would more core
memory increase throughput appreciably, or would it be
necessary to also increase central processor power?
These are three quite different kinds of questions; one
concerns detecting a long-term trend, another assessing
the effects of a system change, and a third estimating
effects of the decision to alter the configuration. Yet
all of these require knowledge of current and past
system performance, the type of knowledge that must be
the result of long-term performance monitoring. This is
not simple enough to be picked up overnight or in one
series of experiments, nor can it be assessed by
watching one or two parameters over a long period. One
must have a thorough understanding of the pattern of
performance by knowing the mean values of a number of
measures and knowing something about the variations
from these means. This paper hardly needs to recommend
that computer managers establish an understanding of
performance pattern; they already are very conscious of
the need. What it does is recount development of a
method of doing so for the CDC 6400 at the University
of Washington and of the selection of ``Kiviat Graphs''
as a means to present data in a synoptic form. The
remainder of this paper will give a brief account of
the authors' experience in designing a measurement
system for the CDC 6400 at the University of Washington
Computer Center. This will include comments on the
approach to deciding what to measure and display for
the synoptic view of the system, as well as how to
provide more detailed data for backup. Examples of the
use of Kiviat Graphs [4] to show the effects of load
shift and of a system configuration change are
included, and the effect of a change of operating
system will be noted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brotherton:1974:CCC,
author = "D. E. Brotherton",
title = "The computer capacity curve --- a prerequisite for
computer performance evaluation and improvement",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "166--179",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809382",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurements of themselves have tended to concentrate
on specific computer configuration components (e.g.,
CPU load, channel load, disk data set contention,
problem program optimization, operating system
optimization, etc.) rather than at the total computer
configuration level. As a consequence, since these
components can have a high degree of interaction, the
requirement currently exists for a workable
configuration performance concept which will reflect
the configuration performance change that is the
resultant of single or multiple component change. It is
the author's opinion that such a concept will provide
management and measurement specialists a planning and
analysis tool that can be well Used in evaluating the
costs. It is to this configuration performance concept
that this paper is addressed, and the concept by my
choosing is named ``The Computer Capacity Curve.''",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Erikson:1974:VCU,
author = "Warren J. Erikson",
title = "The value of {CPU} utilization as a criterion for
computer system usage",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "180--187",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is generally agreed that a computer system's CPU
utilization means little by itself, but there has been
only a limited amount of research to determine the
value of CPU utilization when used with other
performance measures. This paper focuses on
time-sharing systems (or similar systems such as some
remote batch systems) as viewed by someone who wants to
minimize the mean cost per job run on the system. The
paper considers cost per job to include both the
computer cost (as allocated among all the jobs run on
the system) and the user cost (where user cost is the
time spent waiting for a response from the system
multiplied by the user's wage rate). Given this
approach, cost per job is a function of some constants
(user wage rate, computer system cost, and mean
processing time per job) and only one variable (CPU
utilization). The model thus developed can be used to
determine the optimum CPU utilization for any system.
It can also be used to determine the value of different
tuning efforts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Badel:1974:AOP,
author = "M. Badel and E. Gelenbe and J. Leroudier and D. Potier
and J. Lenfant",
title = "Adaptive optimization of the performance of a virtual
memory computer",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "188--188",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809384",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is known that the regulation of the degree of
multiprogramming is perhaps one of the most important
factors determining the overall performance of a
virtual memory computer. In this paper we present an
approach which differs some what from the approaches
usually taken to regulate the degree of
multiprogramming, which are mainly derived from the
working-set principles. We design a controller which
will regulate the system in order to optimize a given
performance measure. The controller is applied to a
system where the critical resource is primary memory,
and we are only concerned with systems where
ineffective regulation leads to the phenomenon known as
thrashing due to extensive paging activity. In the
first section, the dynamics of the system we wish to
regulate are investigated using an analytical model.
The system consists of a set of terminals and of a
resource loop (CPU, secondary memory device, file disk)
shared by the users. Using classical assumptions about
program behavior (e.g., life-time function), the
throughput of the RL is obtained as a function of the
degree of multiprogramming $n$ (number of users sharing
the resources at a given instant of time) and of the
system parameters. This result provides a greater
insight of the ``plant'' we wish to control. The
mathematical results are validated and extended with
data from simulation experiments using a more detailed
model (overheads and non-exponential assumption). In
the next section, a criterion called ``dilatation''
based on the utilization of the different resources is
defined. From the analytical and simulation results of
the first section, it can be shown that there exists a
value no of the degree of multiprogramming which
maximizes this criterion. The regulation of $n$ to no
is achieved by controlling the access of the users to
the RL. The value of no is estimated in real-time
through a continuous estimation of the two first
moments of the criterion. Using these estimations, the
decision of introducing or not a new user in the RL is
taken whenever a user leaves a terminal or departs from
the RL. Extensive simulation experiments were
conducted, where the implementation of the different
functions of the controller have been thoroughly
simulated. They have shown that the control scheme
leaves to an improvement of the system performance in
mean response time and resource utilization, and,
overall, adapts in real-time the degree of
multiprogramming to the characteristics of the users
(the adaptation is performed in 4 sec. or so for a unit
variation of the optimal degree of multiprogramming). A
discussion of practical application of results ends the
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kimbleton:1974:BCS,
author = "Stephen R. Kimbleton",
title = "Batch computer scheduling: a heuristically motivated
approach",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "189--198",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809385",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Efficient scheduling of jobs for computer systems is a
problem of continuing concern. The applicability of
scheduling methodology described in the operations
research literature is severely restricted by the
dimensionality of job characteristics, the number of
distinct resource types comprising a computer system,
the non-deterministic nature of the system due to both
interprocess interaction and contention, and the
existence of a multitude of constraints effecting job
initiation times, job completion times, and job
interactions. In view of the large number of issues
which must be considered in job scheduling, a heuristic
approach seems appropriate. This paper describes an
initial implementation of such an approach based upon a
fast, analytically driven, performance prediction
tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharp:1974:APD,
author = "Joseph C. Sharp and James N. Roberts",
title = "An adaptive policy driven scheduler",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "199--208",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809386",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The theory of policy driven schedulers (Ref. [1]) is
extended to cover cases in which the scheduling
parameters are allowed to adapt dynamically as the
system's job load varies. The system under
consideration offers batch, time sharing and limited
real time services. Data from simulated and live loads
are presented to evaluate both the static and the
adaptive schedulers. A policy driven scheduler makes
its decisions with respect to a set of policy
functions, fi(t). Each of the policy functions
corresponds to a different type of user and specifies
the amount of computing resources that the system will
try to give a user in that group within a given total
amount of elapsed time. It is found that the policy
functions must be set conservatively in order to avoid
response problems during periods of heavy load, but
that during more lightly loaded periods the
conservative settings result in widely disparate rates
of service to similar jobs. One solution is to vary the
policy functions as the job load changes. A dynamic
algorithm is presented that maintains responsiveness
during heavy loads and provides fairly uniform service
rates at other times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merrill:1975:FCC,
author = "H. W. Barry Merrill",
title = "Further comments on comparative evaluation of {Kiviat}
graphs",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "1--10",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mike Morris has presented an excellent discussion in
these pages (1) of the use of Kiviat Graphs for
Computer Performance Evaluation, referencing another
fine article (2) which proposed a technique for
analytic comparisons (rankings) of these Graphs. Morris
also proposes that these techniques may be very useful
in describing system performance, and suggests a
different method for calculation of `Figures of Merit'
of Kiviat Graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stevens:1975:NFM,
author = "Barry A. Stevens",
title = "A note on figure of merit",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "11--19",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since Merrill proposed a Figure of Merit (FOM) for use
in interpretation of the Kiviat Graph (KG), the FOM has
found its way into at least one computer program to
plot those graphs, and has been the subject of further
discussion and amplification and has had alternate
computation methods proposed and rebutted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bell:1975:MCP,
author = "Thomas E. Bell",
title = "Managing computer performance with control limits",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "21--28",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dr. Bell received his doctorate in Operations
Management from the University of California at Los
Angeles in 1968. He immediately joined the Rand
Corporation as a Member of the Technical Staff in its
Computer Science Department and undertook research in
the simulation and perfomance improvement of computing
systems. During this research he participated in the
definition of the Extendable Computer System Simulator,
the development of a methodology for computer
performance improvement, and analysis of large,
multi-machine computer installations. He also analyzed
requirements for future command-and-control systems and
for logistic systems, in order to determine required
system functions and hardware size. He left Rand in
early 1974 to join the Software Research and Technology
Staff of TRW Systems Group where he is currently
developing improved techniques to specify the
requirements of computer software systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Browne:1975:AMP,
author = "J. C. Browne",
title = "An analysis of measurement procedures for computer
systems",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "29--32",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper purports to be a partial record of the
remarks made by the author at a panel session sponsored
by SIGMETRICS at the 1974 ACM National Conference in
San Diego. All of the material covered in the talk is
not included here primarily because it appears in other
contexts or in the presentations of other speakers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Terplan:1975:COR,
author = "Kornel Terplan",
title = "Cost-optimal reliability of data processing systems",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "1--12",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the advent of third generation computing systems,
the increase in complexity and power has reached a
degree which exceeds the human ability to understand,
to analyze, to predict, and to optimize system
performance and reliability. The only method that can
help is measurement. In defining measurement purposes,
one has to define which measurable quantities in the
system are significant and which may be ignored. But,
at the present time, we do not know in general what is
relevant in the measurements. For the sake of clarity,
it is useful to define several levels of measurement
organizational level --- computer center level-
computing system level --- job level --- computer
subsystem level.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Landwehr:1975:USM,
author = "Carl E. Landwehr",
title = "Usage statistics for {MTS}",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "13--23",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The following report is presented in response to
Professor Browne's request for case studies of
performance measurement projects; this study takes a
macroscopic view of a large-scale time sharing and
batch processing installation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reddy:1975:EEM,
author = "Y. V. Reddy",
title = "Experimental evaluation of a multiprogrammed computer
system",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "24--32",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041704",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on the design and analysis of a
statistical experiment conducted on a `live' job stream
to determine the effect of segment size used for
storage allocation on the system performance.
Performance measures selected are turnaround time,
total cost and CPU utilization. The experiment consists
of one factor, the segment size, at five levels.
Uncontrolled factors such as EXCP's (number of I/O
starts) and core usage are included as covariates in
the analysis of variance. This experiment is part of a
continuing activity of Measurement, Evaluation and
Simulation. It is designed to provide data for
improving performance incrementally. The results of the
experiment provided an optimal segment size for the
given classing/scheduling algorithm and core-layout.
Design objectives and details of the analysis are also
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhandarkar:1975:PAM,
author = "Dileep P. Bhandarkar",
title = "A practical application of memory interference
models",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "33--39",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041705",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper briefly describes an approximate Markov
chain model for memory interference in a multiprocessor
system like C.mmp. The modeling assumptions explain the
level of abstraction at which the analysis is carried
out. Some empirical measurements are presented to
determine the model parameters for C.mmp. The analytic
results obtained from the model are compared with some
measured and simulation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bahr:1975:NFM,
author = "Dieter Bahr",
title = "A note on figures of merit",
journal = j-SIGMETRICS,
volume = "4",
number = "3",
pages = "1--3",
month = jul,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041707.1041708",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are different ways to compute figures of merit
(FOM). You may use Morris' [1] or Merrill's method [2]
or create any new one. But, in my opinion, that does
not answer the question whether these numbers are
nonsense or not.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boehm:1975:ICP,
author = "B. W. Boehm and T. E. Bell",
title = "Issues in computer performance evaluation: some
consensus, some divergence",
journal = j-SIGMETRICS,
volume = "4",
number = "3",
pages = "4--39",
month = jul,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041707.1041709",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper summarizes the results of an ACM/NBS
Workshop on Computer Performance Evaluation. Computer
Performance Evaluation (CPE) was selected as the
subject of an ACM/NBS Workshop because of the
significant leverage CPE activities can have on
computer usage. This paper describes a number of
conclusions abstracted from the discussions as well as
presenting recommendations formally adopted by the
participants. While several of these conclusions
indicate that improvements are needed in performance
analysis tools, another suggests that improved
application of CPE could be achieved by better
documentation of analysis approaches. More integration
of data collection and modeling are considered
necessary for the performance analysis field to develop
its full potential. Participants noted that the common
emphasis on data collection or modeling, to the
exclusion of considering objectives, often seriously
degrades the value of performance analyses; the only
savings that really count from a performance analysis
are the ones that appear on the bottom line of the
balance sheet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barber:1975:BC,
author = "Eric Ole Barber and Arne Asphjell and Arve Dispen",
title = "Benchmark construction",
journal = j-SIGMETRICS,
volume = "4",
number = "4",
pages = "3--14",
month = oct,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041711.1041712",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A partially automated method of generating benchmarks
for comparison of EXEC 8 with other systems has been
developed as one step in preparation for choosing a new
computer at the University of Trondheim.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marrevee:1975:MPP,
author = "J. P. Marrev{\'e}e",
title = "Measurements of the {Philips P1400} multiprogramming
system",
journal = j-SIGMETRICS,
volume = "4",
number = "4",
pages = "15--45",
month = oct,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041711.1041713",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of performance measurements have been made on
a Philips P1000 computer under its Multiprogramming
System (MPS) in a business applications environment.
All measurements were collected by software monitoring
programs which were developed with the following
objectives in mind: general applicability; minimum
overhead; and, as much as possible, independence of
Monitor releases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wright:1976:AET,
author = "Linda S. Wright and William A. Burnette",
title = "An approach to evaluating time sharing systems:
{MH-TSS} a case study",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "8--28",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The authors conducted a benchmark measurement of the
Murray Hill Time Sharing System (MH-TSS) running on a
Honeywell 6000. The object of the test was to duplicate
the load normally present on the Murray Hill production
system, and measure the system's behavior before and
after a major software release and a major hardware
improvement. Five different load levels, from 30 to 90
users, were measured for each configuration. This paper
discusses the methods used in the design of the
experiment and in the analysis and interpretation of
the results. Several measurement tools were used in
this test. The event trace collection facility of
MH-TSS was used for the benchmark measurement and for
the design and fine tuning of a scrint representing the
normal load at Murray Hill. A commercially available
H6000-specific terminal simulator was used to feed
these scripts to the system. The batch background
system was loaded by a stream of synthetic jobs,
matched in resource usage characteristics to a set of
jobs chosen at random from the job stream of the
production system. The event trace data gathered at
various load levels under the three software and
hardware configurations were analyzed using two
techniques employing a state transition representation
of program behavior and system response. The result was
a set of data which documents the expected performance
improvements for the new software and hardware being
installed at Murray Hill, and which suggests the
expected growth potential for MH-TSS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "event trace; monitoring; operating systems; queuing
networks; response time; state transition models",
}
@Article{Calcagni:1976:SRK,
author = "John M. Calcagni",
title = "Shape in ranking {Kiviat} graphs",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "35--37",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to address the topic of
ranking or comparing Kiviat Graphs. Several articles
have appeared on the subject. For background
information the reader is directed to the original
article by Philip Kiviat and Kenneth Kolence (1) and to
the articles on ranking by Barry Merrill (2, 4) and
Michael Morris. The main emphasis here will be on
showing how automatic inclusion of axis-value
normalizations and hence of pattern normalization can
be achieved. It is hoped that this will be one way of
making the ranking of Kiviat Graphs more meaningful and
hence more useful. Pattern recognition is, after all,
one of the main reasons for using the Kiviat Graph
technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eisenfeld:1976:IRH,
author = "J. Eisenfeld and David R. Barker and David J.
Mishelvich",
title = "Iconic representation of the human face with computer
graphics",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "38--39",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are many applications for the iconic
representation of the human face. The program discussed
here was designed to describe the face by means of
measurements made on a skeletal radiograph and, in
particular, could be used to indicate changes resulting
from oral surgery. The computer generated faces are
drawn using a program modified by the authors which was
produced and kindly given to us by Mr Robert Jacob and
Dr William H. Huggins of the Johns Hopkins University.
Their program was based on that developed by Dr Herman
Chernoff (1) of Stanford University. The program was
originally designed for the presentation of
multivariate statistical data and was modified by Jacob
and Huggins for use in iconic communication. As a
result of our modifications, the mouth, nose, and
facial outline are presented more realistically, the
data input is interactive and quicker, especially when
only a few input variables are more directly related to
facial components to facilitate accuracy in drawing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nutt:1976:TCS,
author = "Gary J. Nutt",
title = "Tutorial: computer system monitors",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "41--51",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The most important questions to be answered before
attempting to monitor a machine are {\em what\/} to
measure and {\em why\/} the measurement should be
taken. There is no general answer to these questions,
although a comprehensive set of considerations has been
discussed elsewhere. The following example indicates
some of the considerations involved. Suppose one is
interested in tuning a medium scale system which
utilizes virtual memory to support a batch
multiprogramming strategy. The nature of the job load
is a major factor in determining system performance;
the mix may be monopolized by I/O-bound jobs which use
very little processor time. In this case, the
bottleneck might be the mass storage system or the
peripheral devices. Resource utilization of the
peripheral devices may indicate bottlenecks at that
point; high mass storage utilization may not be
attributable only to the I/O operations, but may be
significantly influenced by the virtual memory
replacement policy. Processor utilization in this
system is also an insufficient measure for most
purposes, since the overhead time for spooling,
multiprogramming, and virtual memory may be unknown. A
more useful measurement for operating system policy
studies would quantify processor utilization for the
user as well as for each function of interest in the
operating system. From this example, one can see that
the variety of evaluation objectives and computer
systems causes the determination of what and why to be
largely a heuristic problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cotton:1976:SFP,
author = "Ira W. Cotton",
title = "Some fundamentals of price theory for computer
services",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "1--12",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The authors conducted a benchmark measurement of the
Murray Hill Time Sharing System (MH-TSS) running on a
Honeywell 6000. The object of the test was to duplicate
the load normally present on the Murray Hill production
system, and measure the system's behavior before and
after a major software release and a major hardware
improvement. Five different load levels, from 30 to 90
users, were measured for each configuration. This paper
discusses the methods used in the design of the
experiment and in the analysis and interpretation of
the results. Several measurement tools were used in
this test. The event trace collection facility of
MH-TSS was used for the benchmark measurement and for
the design and fine tuning of a scrint representing the
normal load at Murray Hill. A commercially available
H6000-specific terminal simulator was used to feed
these scripts to the system. The batch background
system was loaded by a stream of synthetic jobs,
matched in resource usage characteristics to a set of
jobs chosen at random from the job stream of the
production system. The event trace data gathered at
various load levels under the three software and
hardware configurations were analyzed using two
techniques employing a state transition representation
of program behavior and system response. The result was
a set of data which documents the expected performance
improvements for the new software and hardware being
installed at Murray Hill, and which suggests the
expected growth potential for MH-TSS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "event trace; monitoring; operating systems; queuing
networks; response time; state transition models",
}
@Article{Giammo:1976:DCP,
author = "Thomas Giammo",
title = "Deficiencies in computer pricing structure theory",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "13--21",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to address the topic of
ranking or comparing Kiviat Graphs. Several articles
have appeared on the subject. For background
information the reader is directed to the original
article by Philip Kiviat and Kenneth Kolence (1) and to
the articles on ranking by Barry Merrill (2, 4) and
Michael Morris. The main emphasis here will be on
showing how automatic inclusion of axis-value
normalizations and hence of pattern normalization can
be achieved. It is hoped that this will be one way of
making the ranking of Kiviat Graphs more meaningful and
hence more useful. Pattern recognition is, after all,
one of the main reasons for using the Kiviat Graph
technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kimbleton:1976:CPD,
author = "Stephen R. Kimbleton",
title = "Considerations in pricing distributed computing",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "22--30",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are many applications for the iconic
representation of the human face. The program discussed
here was designed to describe the face by means of
measurements made on a skeletal radiograph and, in
particular, could be used to indicate changes resulting
from oral surgery. The computer generated faces are
drawn using a program modified by the authors which was
produced and kindly given to us by Mr Robert Jacob and
Dr William H. Huggins of the Johns Hopkins University.
Their program was based on that developed by Dr Herman
Chernoff (1) of Stanford University. The program was
originally designed for the presentation of
multivariate statistical data and was modified by Jacob
and Huggins for use in iconic communication. As a
result of our modifications, the mouth, nose, and
facial outline are presented more realistically, the
data input is interactive and quicker, especially when
only a few input variables are more directly related to
facial components to facilitate accuracy in drawing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kiviat:1976:BRG,
author = "Philip J. Kiviat",
title = "A brief review of the {GAO} task group's
recommendations on management guidelines for pricing
computer services in the federal government",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "71--83",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The most important questions to be answered before
attempting to monitor a machine are {\em what\/} to
measure and {\em why\/} the measurement should be
taken. There is no general answer to these questions,
although a comprehensive set of considerations has been
discussed elsewhere. The following example indicates
some of the considerations involved. Suppose one is
interested in tuning a medium scale system which
utilizes virtual memory to support a batch
multiprogramming strategy. The nature of the job load
is a major factor in determining system performance;
the mix may be monopolized by I/O-bound jobs which use
very little processor time. In this case, the
bottleneck might be the mass storage system or the
peripheral devices. Resource utilization of the
peripheral devices may indicate bottlenecks at that
point; high mass storage utilization may not be
attributable only to the I/O operations, but may be
significantly influenced by the virtual memory
replacement policy. Processor utilization in this
system is also an insufficient measure for most
purposes, since the overhead time for spooling,
multiprogramming, and virtual memory may be unknown. A
more useful measurement for operating system policy
studies would quantify processor utilization for the
user as well as for each function of interest in the
operating system. From this example, one can see that
the variety of evaluation objectives and computer
systems causes the determination of what and why to be
largely a heuristic problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morris:1976:PIP,
author = "Michael F. Morris",
title = "Problems in implementing and processing computer
charging schemes",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "84--88",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041744",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is important to point out at the beginning of this
presentation that we have strayed quite far from the
titled topic of our workshop --- `Pricing Computer
Services.' This makes my task much easier because I'm
not at all sure what `service' we get from computers
and `pricing' is seldom related in any economic sense
with the cost of production. Here we have really been
discussing `Charging for Computer Resource Usage.' I
will stay with the topic as we've been discussing it
rather than with the topic as I thought it should be.
To make to distinction clear between pricing services
and charging for resource usage I will relate a very
simple story from a recent newspaper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luderer:1976:CPM,
author = "Gottfried W. R. Luderer",
title = "Charging problems in mixed time-sharing\slash batch
systems: cross subsidization and invariant work units",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "89--93",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041745",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses two topics related to charging
for computing services in mixed timesharing/batch
systems. The first one is the problem of cross
subsidization between time-sharing and batch service. A
method is proposed which helps to avoid this
phenomenon. The second topic deals with the question of
helping the user to divide his work between
time-sharing and batch service based on charging
information. Basically, the approach is to define a
service-invariant computing work unit, which is priced
differently according to grade of service. Time-sharing
and batch are considered to be different grades of
service. The cost impact of moving work between
services can thus be more easily estimated. A method
for calculating grade-of-service factors from cost and
workload estimates is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oatey:1976:STM,
author = "David J. Oatey",
title = "{SIGMETRICS} technical meeting on pricing computer
services",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "94--102",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041746",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This presentation will show how one large installation
actually does pricing of several on-line systems. This
is a `pricing in practice' example with the resultant
procedures, measures, and pricing determined by the
blending of several practical, political, and
theoretical influences.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gutsche:1976:UE,
author = "Richard H. Gutsche",
title = "User experience",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "103--107",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041747",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Security Pacific is the tenth largest bank in the
United States, operating 500 banking locations in the
State of California. Our Electronic Data Processing
Department serves the entire system from its Glendale
Operations Center and a satellite center in Hayward.
The Hayward location serves as an input/output center
for our Northern California banking offices. Data
Transmission provides for centralization of all
accounting functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anonymous:1976:PC,
author = "Anonymous",
title = "Participant's choice",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "108--122",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041748",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During these two sessions, chaired by Richard Gutsche
of Security Pacific National Bank, a panel of experts
addressed specific pricing problems the participants
and attendees felt were important. The preliminary
questions that the panelists addressed included: $
\bullet $ What should be included in an overhead charge
and why? $ \bullet $ Should a computer center be
price-competitive with an outside market?$ \bullet $
Funding a computer center --- real or funny money?$
\bullet $ What is an appropriate charging philosophy
for a paging environment?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luderer:1976:DCR,
author = "Gottfried W. R. Luderer",
title = "Defining a computer resource unit",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "5--10",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041722",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method for the construction of a resource component
charging formula for computer service in a
multiprogramming system is defined. Charges are
proportional to relative resource costs, to fractional
resource use with regard to total expected resource
usage, and the intent is to recover cost without profit
or loss. Further, a method is presented that simplifies
the treatment of overhead or unallocatable resource
costs. An aggregate `Computer Resource Unit' is
defined, which attempts to characterize workload in a
system-invariant way. Experiences with this concept and
its limitations are discussed. Recommendations for
those planning to introduce a similar concept are
given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer charging; overhead allocation; virtual time;
workload characterization",
}
@Article{Roehr:1976:PIT,
author = "K. Roehr and K. Niebel",
title = "Proposal for instruction time objectives",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "11--18",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041723",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The designer of an instruction processing unit is
generally faced with the problem to implement a machine
able to execute a given instruction set within given
timing and cost constraints. A very common method to
state instruction timing constraints is by means of an
average instruction time",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Collins:1976:PIC,
author = "John P. Collins",
title = "Performance improvement of the {CP-V} loader through
use of the {ADAM} hardware monitor",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "63--67",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041724",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ADAM hardware monitor can be used to localize and
identify several types of performance-impairing
behavior in user programs. This paper presents a case
study for such an improvement carried out on the CP-V
overlay loader. Through measurement of the execution
behavior and the subsequent analysis of the resulting
data, problems of three basic types were identified: 1.
The presence of inefficiently coded routines in areas
of high execution intensity; 2. The use of overly
general routines along heavily-used program paths; and
3. The use of inefficient algorithms for processing the
large amounts of data with which the loader deals. The
subsequent redesign and recoding of the problem areas
have resulted in a significant performance improvement:
the time required to load a program has been reduced by
a factor of between two and ten, dependent upon the
nature of the program and the loader options
specified.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwajn:1976:SLI,
author = "A. Brandwajn",
title = "Simulation of the load of an interactive system",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "69--92",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041725",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe a simulator of interactive users designed
for the resource sharing system ESOPE. We stress the
guide-lines of the design as well as the problems of
interface with the operating system, of measurements,
and of perturbations caused by the simulator in the
statistics gathered. We show two examples of an
application of the simulator to the design of a
resource-sharing system, viz., to an analysis of load
regulation policies, and to an evaluation of the
improvement in system performance one may expect from
implementing shared translators. Finally, we use the
load simulator to validate a mathematical model. The
latter is developed by step-wise refinement, using
measured values of model parameters, till a good
agreement between the performance indices computed from
our model and those measured in a real system under
simulated load, is obtained. It is observed that, for
most of the performance measures considered, a simple
model matches fairly well the `real world'.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coppens:1976:QER,
author = "G. W. J. Coppens and M. P. F. M. van Dongen and J. P.
C. Kleijnen",
title = "Quantile estimation in regenerative simulation: a case
study",
journal = j-SIGMETRICS,
volume = "5",
number = "3",
pages = "5--15",
month = "Summer",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041727.1041728",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model key-punching in a computer center as a
queuing simulation with 2 servers (typists) and 3
priority classes (small, medium, large jobs). The 90\%
quantile of queuing time is estimated for different
borderlines between the 3 job classes. Confidence
intervals for the quantiles are based on the
regenerative properties of the simulation, as derived
by Iglehart (1974). They utilize the asymptotic
normality of the estimated quantile, and a rather
complicated expression for its variance. Numerical
results are given for the quantiles (and averages) of
the queuing times in each job class, for several
borderlines between the 3 job classes. The effects of
simulation runlength on the confidence intervals were
also examined. The effects of varying job-class
borderlines were tentatively modeled by a regression
model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Estell:1976:HFRa,
author = "Robert G. Estell",
title = "How fast is `real-time'?",
journal = j-SIGMETRICS,
volume = "5",
number = "3",
pages = "16--18",
month = "Summer",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041727.1041729",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A single bench mark test was compiled and run on the
AN/UYK-7 computer, and on a number of commercial
computers, in order to measure the relative throughput
of the UYK-7, which is the Navy's large scale real-time
computer. The results indicate the speeds and
accuracies of each host; however, general conclusions
can be drawn only with some risk.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mills:1976:SMC,
author = "Philip M. Mills",
title = "A simple model for cost considerations in a batch
multiprocessor environment",
journal = j-SIGMETRICS,
volume = "5",
number = "3",
pages = "19--27",
month = "Summer",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041727.1041730",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a simple model which provides a
procedure for estimating the effect of additional
hardware on run time. The additional hardware may be
additional processors, more powerful processors, an
increase in memory size or additional memory modules.
Run time is related to cost effectiveness. A measure of
memory interference in the form of effective processing
power is determined for multiprocessors and used in the
formulation of run time. The overall procedure allows
the user to compare different multiprocessor hardware
configurations on a cost effective basis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchanan:1976:IBM,
author = "Irene Buchanan and David A. Duce",
title = "An interactive benchmark for a multi-user minicomputer
system",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "5--17",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041733",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The work that forms the basis for this paper was
undertaken as part of an exercise to purchase two
multi-user minicomputer systems to be developed as
interactive facilities for grant holders supported by
the Engineering Board of the United Kingdom Science
Research Council.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Estell:1976:HFRb,
author = "Robert G. Estell",
title = "How fast is `real-time'?",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "18--20",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041734",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A single bench mark test was compiled and run on the
AN/UYK-7 computer, and on a number of commercial
computers, in order to measure the relative throughput
of the UYK-7, which is the Navy's large scale real-time
computer. The results indicate the speeds and
accuracies of each host; however, general conclusions
can be drawn only with some risk.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rafii:1976:SPR,
author = "Abbas Rafii",
title = "Study of the performance of {RPS}",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "21--38",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041735",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The objective of this study is to evaluate the impact
of RPS (Rotational Position Sensing) on the response
time and utilization of multiple spindle disk drives
with a shared channel. Simulation models are used to
compare the effectiveness of the RPS scheme with the
systems without RPS capability. Analytical models for
the number of RPS rotation misses and the utilization
of the channel at the saturation point are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Price:1976:CQN,
author = "Thomas G. Price",
title = "A comparison of queuing network models and
measurements of a multiprogrammed computer system",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "39--62",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041736",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although there has been a substantial amount of work
on analytical models of computer systems, there has
been little experimental validation of the models. This
paper investigates the accuracy of the models by
comparing the results calculated using analytical
models with measurements of an actual system. Models
with and without overlapped seeks are compared. Also,
we show how a model can be used to help interpret
measurements of a real system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical models; performance measurement and
evaluation; queuing networks",
}
@Article{Buzen:1976:TTT,
author = "J. P. Buzen",
title = "Tuning: tools and techniques",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "63--81",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041737",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tuning is basically a two stage process: the first
stage consists of detecting performance problems within
a system, and the second stage consists of changing the
system to correct these problems. Measurement tools
such as hardware monitors, software monitors and
accounting packages are typically used in the first
stage, and tools such as optimizers, simulators and
balancers are sometimes used in the second stage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1977:WSA,
author = "Mitchell G. Spiegel",
title = "Workshop summary: `Applications of queuing models to
{ADP} system performance prediction'",
journal = j-SIGMETRICS,
volume = "6",
number = "1",
pages = "13--33",
month = "Winter",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1044829.1044830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop was held on the Applications of Queuing
Models to ADP System Performance Prediction on 7-8
March 1977 at the National Technical Information
Service in Springfield, VA. Topics were divided into
four general areas: (1) Application of Queuing Models
to Feasibility and Sizing Studies, (2) Application of
Queuing Models to System Design and Performance
Management, (3) Queuing Model Validation and (4) New
Queuing Model Implementations. Mr Philip J. Kiviat,
Chairman, SIGMETRICS, made the welcoming remarks. As
Workshop Chairman, I provided a historical overview of
queuing model use which traced the development of the
application of queuing models to ADP system performance
prediction through the 20th century, while setting the
stage for each speaker's talk.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerman:1977:TWF,
author = "L. Hellerman",
title = "A table of work formulae with derivations and
applications",
journal = j-SIGMETRICS,
volume = "6",
number = "1",
pages = "35--54",
month = "Winter",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1044829.1044831",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Formulae for the work of certain common simple
computational steps are derived. The evaluation is in
terms of an information theoretic measure. The results
are then applied to evaluate the work of multiplication
and division, and the work of the IBM S/370 branch and
link instruction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allen:1977:NES,
author = "R. C. Allen and S. R. Clark",
title = "A note on an empirical study of paging on an {IBM
370\slash 145}",
journal = j-SIGMETRICS,
volume = "6",
number = "1",
pages = "55--62",
month = "Winter",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1044829.1044832",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A summary is presented of the paging activity observed
for various programs executing on a System/370 model
145 using OS/VSI (Release 2.0). Paging activity was
measured by periodic sampling of the queues involved in
real storage page management and by inspection of page
traffic counters maintained by the operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morrison:1977:ASC,
author = "Robert L. Morrison",
title = "Abstracts from the 1977 {SIGMETRICS\slash CMG VIII}
conference",
journal = j-SIGMETRICS,
volume = "6",
number = "2",
pages = "3--21",
month = "Spring",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041750.1041751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazos:1977:FDW,
author = "Constantine Lazos",
title = "Functional distribution of the workload of a linked
computer system and its simulation",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "5--14",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consideration is given to a possible functional
distribution of the workload over two linked computers
with separate channel access to a large disc store,
into the resource utilisation of the linked system
achieved by simulation using a modified and re-entrant
single processor simulator. Results suggest that the
proposed distribution realises a high utilisation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "compilation; disc channel traffic; hardware
utilisation; I/O buffers; in process; linked computer
system; multiprocessing; out process; simulation; trace
driven; work load",
}
@Article{Scheer:1977:COM,
author = "A.-W. Scheer",
title = "Combination of an optimization model for hardware
selection with data determination methods",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "15--26",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041755",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The selection of an EDP configuration often fixes a
firm to a single manufacturer for a long time and the
capabilities of the chosen computer will continually
influence the firm's organization. Only few approaches
exist to give assistance to the investors by developing
useful decision models based on the investment theory
/11, 12/. The hardware selection methods /4, 13/ used
up to now, like benchmark tests, don't meet these
demands. In this paper an investment model based on
mathematical programming is developed which considers
the aspects of investment for hardware selection.
Nevertheless, the present methods stay valid because
their output can be used as delta input for the
optimization model. Therefore, a concept is proposed
which combines these methods with an optimization
model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berinato:1977:AMT,
author = "Terence Berinato",
title = "An analytical model of a teleprocessing system",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "27--32",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A queuing model has been developed to study the
performance and capacity of a casualty insurance
teleprocessing system. This paper presents the salient
features of the system itself, relates those features
to basic queuing theory algorithms, outlines the basic
model construction, and discusses the validation
results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chanson:1977:SSA,
author = "Samuel T. Chanson and Craig D. Bishop",
title = "A simulation study of adaptive scheduling policies in
interactive computer systems",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "33--39",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041757",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, some work has been done in the area of
dynamically adaptive scheduling in operating systems
(i.e., policies that will adjust to varying workload
conditions so as to maximize performance) [4],[5],
[10], [11]. However, most studies deal with
batch-oriented systems only. The University of British
Columbia operates an IBM 370/168 running under MTS
(Michigan Terminal System) which is principally used
interactively. It has been known for some time that the
system is Input/Output bound. The main goal of this
work is to determine to what extent adaptive control,
particularly as related to processor scheduling, can
improve performance in a system similar to U. B. C.'s.
Simulation is used throughout the study and because of
this, the simulator and the workload are described in
some detail. The target machine is a somewhat
simplified version of the U.B.C. System.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ziegler:1977:DST,
author = "Kurt Ziegler",
title = "A data sharing tutorial",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "3--7",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041760",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial is intended to acquaint the reader with
the issues of DATA SHARING and to develop an
understanding for the implications of such facilities
in the topic of integrity, performance, and recovery.
Some future concerns are also discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scott:1977:PDP,
author = "Shirley E. Scott",
title = "Pricing {D.P.} products: a timesharing
implementation",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "8--12",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Periodically, vending Data Processing organizations
are faced with the task of establishing service rates
for a resources provided to Customers. Sigmetrics'
Technical Meeting on Pricing Computer Services
(November, 1975) is a good indicator of the amount and
variety of interest the topic generates. The
proceedings from that meeting were a key source of
reference for the formulation and implementation of a
pricing strategy and automated model in one of Xerox's
timesharing data centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sarzotti:1977:TTS,
author = "Alain Sarzotti",
title = "Transactional terminal system on micro-processor: a
method for identifying \& modeling overall
performance",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "13--22",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A typical banking, financial and administrative system
involves specific characteristics: a large number of
devices around a processor, with several different
kinds of work stations (displays, keyboards, printers,
badge and document readers \ldots{}), a heterogeneous
workload (by linkage of specialized micro-transactions
using local or remote files), versatile operating
facilities on displays for untrained administrative
personnel (form-loading on the display, selecting key
words, spotting errors, generating operational messages
\ldots{}), and working with several sets of typical
functions (savings operations, cheque accounting, fund
transfer, deposits, withdrawals, and mainly data
entry).In this case it was mandatory to approach the
system performance evaluation study by first building
and observing a typical workload model in the forecast
operating environment. Measurement steps were then
scheduled from outside to inside operating procedures
to get analysis from the user's point of view (a bank
teller's operations, for example).Then, overall
performance results were derived by direct measurement,
which established relationships between throughput,
response time, processor overhead, and space and time
parameters related to system behavior. That was done by
progressively increasing the number of terminals and
exercising the workload on two levels of technical and
functional saturation. Simultaneously, a simulation
model used the same description of the workload, and
after validation with the preceding direct measurement
results, was used to extend the previous relationships
on various systems. (The full range of Erlang
distribution parameters is assumed with unknown
servers; the trace-driven method was not possible.)The
final results are shown in tables and charts which
exhibit system boundaries, providing useful guidelines
for designing network stations and performing workload
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bazewicz:1977:UMP,
author = "Mieczyslaw Bazewicz and Adam Peterseil",
title = "Use of modelling in performance evaluation of computer
systems: a case of installations in the {Technical
University of Wroclaw}",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "22--26",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is a number of models of user behaviour applied
in modelling studies on computer system performance
predictions. The models in most cases can be called
`resources-demands models', where users are only
considered as resources consumers. Some authors build
more sophisticated models --- concerning user
psychological features. The paper discusses some of the
users' models and their applicability in modelling and
design of operating systems for computers. Some
examples being the result of the research carried in
the Technical University of Wroclaw, concerning complex
users' model and performance evaluation of operating
systems by simulation are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Orchard:1977:NMC,
author = "R. A. Orchard",
title = "A new methodology for computer system data gathering",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "27--41",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many computer system monitoring, data gathering, and
reduction efforts ignore unbiased sampling techniques.
The approaches generally taken are expensive and can
make no scientifically based statement about the
accuracy of the data gathered or consequent data
reduction. The approach outlined in this paper attempts
to correct these inadequacies by using the theory of
random sampling. Several new techniques are introduced
for obtaining optimal error bounds for estimates of
computer system quantities obtained from random
samples. A point of view is taken (boolean variable
random sampling) which makes it unnecessary to have any
a priori knowledge of the population parameters of the
phenomena being sampled. It is expected that the
techniques introduced will significantly reduce
monitoring overhead for computer systems while
increasing the quality of the data gathered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "boolean random sampling; computer system monitoring;
data gathering",
}
@Article{Underwood:1978:HPE,
author = "Mark A. Underwood",
title = "Human performance evaluation in the use of federal
computer systems: recommendations",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "6--14",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041767",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been increased awareness in recent years of
the high cost of non-hardware items in the Federal ADP
budget in contrast with decreasing costs for much of
the hardware. More attention is being given to software
development costs, systems design practices, automatic
program testing, and the like. Particular commercial
and military systems effectiveness and life cycle costs
now take into consideration such factors as part of the
planning process. It is suggested that not enough
attention has been given to measurement of human
performance variables as part of the systems
procurement and systems evaluation phases of Federal
ADP programs. Recommendations are made for the
incorporation of such measures along with conventional
hardware/software performance measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance; federal systems evaluations;
human performance measurements; psychology of computer
systems usage",
}
@Article{Jain:1978:GSA,
author = "Aridaman K. Jain",
title = "A guideline to statistical approaches in computer
performance evaluation studies",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "18--32",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anonymous:1978:PSQ,
author = "Anonymous",
title = "{Proceedings of the Software Quality and Assurance
Workshop}",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "32--32",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041769",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Honig:1978:DPA,
author = "Howard P. Honig",
title = "Data path analysis: analyzing large {I/O}
environments",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "34--37",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041770",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As data centers grow in complexity and size, vast
amounts of data (I/O) is transferred between
peripherals and CPU's. Data Path Analysis (DPA) is a
technique developed to report the utilization of CPU's,
channels, control units, and disks during data
transfer. Simply put, the technique analyzes data
paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sauer:1978:SRP,
author = "C. H. Sauer and E. A. MacNair",
title = "Simultaneous resource possession in queueing models of
computers",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "41--52",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Neglect of simultaneous resource possession is a
significant problem with queueing network models of
computers. This is illustrated by examples of memory
contention and channel contention with position sensing
I/O devices. A class of extended queueing networks is
defined to allow representation of simultaneous
resource possession. Extended queueing network models
of memory contention and channel contention are given.
Solution techniques and numerical results for these
models are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel contention; hierarchical decomposition; memory
contention; performance evaluation; queueing networks;
regenerative simulation; response time",
}
@Article{Pfau:1978:AQA,
author = "Pamela R. Pfau",
title = "Applied quality assurance methodology",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "1--8",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What is the charter of a Quality Assurance (Q.A.)
department? What are the activities? How are they
undertaken? What is the impact of Quality Assurance
upon a software product? The structure and operating
philosophy of the department are explained in this
report as is the definition of the work cycle as
applied to a new release of a software product.
Comments are made about the interaction between
departments: product development, product maintenance,
publications, education, field support, product
management, marketing, product distribution and quality
assurance. While this is a description of the
activities of a company involved in developing and
marketing software products, the concepts apply to
techniques and practices which would also be beneficial
to any data processing department that develops
in-house application software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bersoff:1978:SCM,
author = "Edward H. Bersoff and Vilas D. Henderson and Stan G.
Siegel",
title = "Software Configuration Management",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "9--17",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811093",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is about discipline. It is about discipline
that managers should apply to software development. Why
is such discipline needed? Quite simply because the
software industry has traditionally behaved in an
undisciplined manner --- doing its own thing. The
products that the industry has turned out have
typically Contained other than what was expected
(usually less, rather than more); Been delivered much
later than scheduled; Cost more than anticipated; Been
poorly documented; and If you have been involved in any
of the situations quoted above, then this paper may be
of some help. In short, if you are now, or intend to
be, a software seller or buyer, then you should benefit
from an understanding of Software Configuration
Management. Lest you think that you are not now, or
ever will be, a software seller or buyer --- keep in
mind that the recent technology explosion in electronic
component miniaturization has placed the era of
personalized computing at hand. In that context, nearly
everyone may be considered a potential seller or buyer
of software. This paper is about the discipline called
Software Configuration Management (SCM). The objective
of SCM is to assist the software seller in achieving
product integrity and to assist the software buyer in
obtaining a product that has integrity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glass:1978:CFL,
author = "Robert L. Glass",
title = "Computing failure: a learning experience",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "18--19",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computing people can learn from failure as well as
success. Most professional papers deal only with the
latter \ldots{} yet it is well known that some of our
most lasting learning experiences are based on failure.
This paper is a lighthearted, anecdotal discussion of a
computing failure, with an underlying message that
sharing the sometimes embarrassing truths about What
Goes Wrong In Our Field is at least as illuminating as
more serious discussions about Things That Look
Promising. There are some necessary defense mechanisms
to be dealt with in discussing failure. People who have
failed in general do not want the world to know about
it. Perhaps even more so, companies which have failed
also do not want the world to know about it. As a
result, the content of this paper is fictionalized to
some extent. That is, company names and people names
are creations of the author, and there are
corresponding distortions in some story details.
However, the computing meat of the paper, the basis for
the failure learning experience, is untouched.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Woodmancy:1978:SQI,
author = "Donald A. Woodmancy",
title = "A Software Quality Improvement Program",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "20--26",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811095",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In late 1976, the NCR Corporation undertook a large
scale Quality Improvement Program for a major set of
systems software. That software set included some 103
separate products totaling 1.3 million source lines. It
included several operating systems, several compilers,
peripheral software, data utilities and
telecommunications handlers. This paper will describe
that effort and its results. The research and planning
that were done to define the program will be described.
The means by which the program was implemented will be
discussed in detail. Finally, some results of the
program will be identified.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fujii:1978:CSA,
author = "Marilyn S. Fujii",
title = "A comparison of software assurance methods",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "27--32",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811096",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several methods are currently employed by software
developers to improve software quality. This paper
explores the application of three of these methods:
quality assurance, acceptance testing, and independent
verification and validation. At first glance these
methods appear to overlap, but a closer evaluation
reveals that each has a distinct objective and an
established set of procedures. The purpose of this
paper is to clarify the role of each of these methods
by examining their scope, organization, and
implementation in the software development process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sukert:1978:EMA,
author = "Alan N. Sukert and Amrit L. Goel",
title = "Error modelling applications in software quality
assurance",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "33--38",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the results of a two-phased
experiment conducted by Rome Air Development Center and
Syracuse University to demonstrate the potential
applicability of software error prediction models in
performing formalized qualification testing of a
software package. First, decisions based upon the
predictions of three software error prediction models
will be compared with actual program decisions for a
large command and control software development project.
Classical and Bayesian demonstration tests are used to
make accept/reject decisions about the software system.
Finally, the results of the two phases will be compared
and some conclusions drawn as to the potential use of
these predictive techniques to software quality
assurance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Duran:1978:TMP,
author = "Joe W. Duran and John J. Wiorkowski",
title = "Toward models for probabilistic program correctness",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "39--44",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811098",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Program testing remains the major way in which program
designers convince themselves of the validity of their
programs. Software reliability measures based on
hardware reliability concepts have been proposed, but
adequate models of software reliability have not yet
been developed. Investigators have recently studied
formal program testing concepts, with promising
results, but have not seriously considered quantitative
measures of the ``degree of correctness'' of a program.
We present models for determining, via testing, such
probabilistic measures of program correctness as the
probability that a program will run correctly on
randomly chosen input data, confidence intervals on the
number of errors remaining in a program, and the
probability that the program has been completely
tested. We also introduce a procedure for enhancing
correctness estimates by quantifying the error reducing
performance of the methods used to develop and debug a
program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yin:1978:EUM,
author = "B. H. Yin and J. W. Winchester",
title = "The establishment and use of measures to evaluate the
quality of software designs",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "45--52",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811099",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It has been recognized that success in producing
designs that realize reliable software, even using
Structured Design, is intimately dependent on the
experience level of the designer. The gap in this
methodology is the absence of easily applied
quantitative measures of quality that ease the
dependence of reliable systems on the rare availability
of expert designers. Several metrics have been devised
which, when applied to design structure charts, can
pinpoint sections of a design that may cause problems
during coding, debugging, integration, and
modification. These metrics can help provide an
independent, unbiased evaluation of design quality.
These metrics have been validated against program error
data of two recently completed software projects at
Hughes. The results indicate that the metrics can
provide a predictive measure of program errors
experienced during program development. Guidelines for
interpreting the design metric values are summarized
and a brief description of an interactive structure
chart graphics system to simplify metric value
calculation is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pierce:1978:RTT,
author = "Robert A. Pierce",
title = "A Requirements Tracing Tool",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "53--60",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811100",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A software development aid termed the Requirements
Tracing Tool is described. Though originally designed
to facilitate requirements analysis and thus simplify
system verification and validation, it has also proven
useful as an aid for coping with changing software
requirements and estimating their consequent cost and
schedule impacts. This tool provides system analysts
with a mechanism for automated construction,
maintenance, and access to a requirements data base ---
an integrated file containing all types and levels of
system requirements. This tool was used during the
development of a large Navy undersea acoustic sensor
system. It is presently being used to support the
Cruise Missile Mission Planning Project. An outline
version of this tool is under development.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Davis:1978:RLP,
author = "Alan M. Davis and Walter J. Rataj",
title = "Requirements language processing for the effective
testing of real-time systems",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "61--66",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "GTE Laboratories is currently developing a trio of
software tools which automate the feature testing of
real-time systems by generating test plans directly
from requirements specifications. Use of the first of
these tools, the Requirements Language Processor (RLP),
guarantees that the requirements are complete,
consistent, non-ambiguous, and non-redundant. It
generates a model of an extended finite-state machine
which is used by the second tool, the Test Plan
Generator, to generate test plans which thoroughly test
the software for conformity to the requirements. These
test plans are supplied to the third tool, the
Automatic Test Executor, for actual testing. The RLP is
the subject of this paper. The primary goal of the RLP
is to provide the ability to specify the features of a
target real-time system in a vocabulary familiar to an
application-oriented individual and in a manner
suitable for test plan generation. The RLP produces a
document which can be easily understood by non-computer
personnel. It is expected that this document will
function as a key part of the ``contract'' between a
real-time system supplier and a customer. This document
must also serve as a springboard for the software
designers during their development of the actual
product. In addition to the requirements document, the
RLP also produces an augmented state transition table
which describes a finite state machine whose external
behavior is identical to the target real-time system as
defined by the specified requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peters:1978:RSR,
author = "Lawrence Peters",
title = "Relating software requirements and design",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "67--71",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Software development is a process which has evolved
into a number of phases. Although the names of the
phases and some of their characteristics differ from
contractor to contractor and customer to customer, the
functional similarities among sets of phases cannot be
ignored. The basic software development scenario
depicted by these phases starts with problem
identification and definition, requirements
specification, design, code, test, and installation and
maintenance. Although some ``smearing'' of one phase
activity into other(s) may occur, this represents the
basic flow. However, it is just that smearing which
occurs between requirements and design that we wish to
explore here. Identifying or defining problems and
solving problems are viewed by many to be separate,
distinguishable activities. They are complementary in
that one identifies what must be done (requirements)
while the other depicts how it will be done (design).
But software designers complain bitterly that
requirements are poorly defined while customers and
analysts often complain that the design is not
responsive to the problem(s) as they perceive it.
Somehow software designers end up discovering
previously unknown requirements and end up solving a
problem which is foreign to the customer. Is there a
workable mechanism to reduce this difficulty?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stavely:1978:DFU,
author = "Allan M. Stavely",
title = "Design feedback and its use in software design aid
systems",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "72--78",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is argued that software system designers would
benefit greatly from feedback about the consequences of
a proposed design if this feedback could be obtained
early in the development process. A taxonomy of
possible types of feedback and other design aids is
presented, and the capabilities of several existing
design aid systems are described relative to this
taxonomy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yoder:1978:NSC,
author = "Cornelia M. Yoder and Marilyn L. Schrag",
title = "{Nassi--Shneiderman} charts an alternative to
flowcharts for design",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "79--86",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811104",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years structured programming has emerged as
an advanced programming technology. During this time,
many tools have been developed for facilitating the
programmer's use of structured programming. One of
these tools, the Structured Flowcharts developed by I.
Nassi and B. Shneiderman in 1972, is proving its value
in both the design phase and the coding phase of
program development. Several programming groups in
System Products Division, Endicott, New York, have used
the Nassi--Shneiderman charts as replacements for
conventional flowcharts in structuring programs. The
charts have been used extensively on some projects for
structured walk-throughs, design reviews, and
education. This paper describes the Nassi--Shneiderman
charts and provides explanations of their use in
programming, in development process control, in
walk-throughs, and in testing. It includes an analysis
of the value of Nassi--Shneiderman charts compared to
other design and documentation methods such as
pseudo-code, HIPO charts, prose, and flowcharts, as
well as the authors' experiences in using the
Nassi--Shneiderman charts. The paper is intended for a
general data processing audience and although no
special knowledge is required, familiarity with
structured programming concepts would be helpful. The
reader should gain insight into the use of
Nassi--Shneiderman charts as part of the total
development process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Benson:1978:SQA,
author = "J. P. Benson and S. H. Saib",
title = "A software quality assurance experiment",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "87--91",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811105",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An experiment was performed to evaluate the ability of
executable assertions to detect programming errors in a
real time program. Errors selected from the categories
of computational errors, data handling errors, and
logical errors were inserted in the program. Assertions
were then written which detected these errors. While
computational errors were easily detected, data
handling and logical errors were more difficult to
locate. New types of assertions will be required to
protect against these errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Assertions; Error categories",
}
@Article{Bauer:1978:AGE,
author = "Jonathan Bauer and Susan Faasse and Alan Finger and
William Goodhue",
title = "The automatic generation and execution of function
test plans for electronic switching systems",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "92--100",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811106",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A three phase functional testing methodology is
described for use in the development cycle of
electronic switching systems. The methodology centers
on a directed graph model of the system and provides
for the checking of system requirements, the generation
of functional tests and the automatic execution of
these tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martin:1978:SAT,
author = "K. A. Martin",
title = "Software acceptance testing that goes beyond the
book",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "101--105",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811107",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The design of software acceptance tests is as
important to meeting contract goals as is the design of
algorithms. This statement is particularly significant
on fixed price contracts with tight schedules. An
extreme instance of the demand placed on acceptance
testing can be found in software projects wherein the
only rigorous testing that required the Computer
Program Configuration Item (CPCI) to exercise its
repertoire of load and store instructions was the
Formal Qualification Test (FQT). This paper is about
such a project, the lessons learned from it, and
provides an effective test approach for fixed price
contracts. A word or two about the project is
appropriate to establish the context that underscores
the impact of the above assertion. Initially 30K (core
words), 16-bit program instructions were to be
developed within one year using a Varian 73 computer
with 32K words of memory for a Command and Control
application under a fixed price contract. A set of a
priori conditions existed that tended to convey the
impression that the inherent risks of this endeavor
were reasonable. They were the ``facts'' that: Of the
30K (core words) to be written, 30\% of this code
already existed and would be used. Contractor standards
would be allowed for documentation with limited use of
Military Specifications No formal Design Reviews or
audits would accompany the deliverable CPCI. Existent
executive software would suffice. A competent and
enthusiastic team was committed to the effort.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Drasch:1978:ITP,
author = "Frederick J. Drasch and Richard A. Bowen",
title = "{IDBUG}: a tool for program development",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "106--110",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811108",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The construction of a reliable computer program
requires, in part, a means of verification of its
component parts prior to their integration into the
overall system. The verification process may consist of
building a test harness to exercise or exhaustively
test a procedure. This technique is known as dynamic
testing. In practice, the application of dynamic
testing requires the coding of a special harness for
each procedure. This consumes valuable programming
time, as much as 50\% of the total effort (FAIR78). It
is also restrictive because the test harness cannot be
easily modified to test aspects of a program which it
was not originally designed to test. We have built a
facility called IDBUG that reduces the programming
effort required to employ dynamic testing by automating
the construction of the test harness. Additionally, it
provides an interactive test environment which permits
more flexible testing. This paper describes IDBUG and
discusses our experience in its application to
maintenance tasks in a commercial environment. Nyone of
the ideas put forth here will be especially novel;
dynamic testing as a software testing tool has been in
use for some time. What we hope to do is illustrate the
beneficial aspects of a particular application of
dynamic testing. It is argued that testing should play
a more limited role in assuring the reliability of
software in light of techniques such as structured
coding, top-down design, proof of correctness, etc.
(McG075). While it is true that eventually the ``art of
computer programming'' will become the ``science of
producing correct programs'', we believe that more
emphasis must be placed on interim solutions to aid in
the construction of reliable software. We present IDBUG
as such a solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stickney:1978:AGT,
author = "M. E. Stickney",
title = "An application of graph theory to software test data
selection",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "111--115",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811109",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph theory is playing an increasingly important role
in the design, analysis, and testing of computer
programs. It's importance is derived from the fact that
flow of control and flow of data for any program can be
expressed in terms of directed graphs. From the graph
representing the flow of control, called the program
graph, many others can be derived that either partially
or completely preserve the program control structure.
One derived graph known as a cyclomatic tree is of
particular value in program testing. It is so named
because the number of leaves of the tree is equal to
the cyclomatic number of the program graph. A thorough
treatment of cyclomatic numbers is provided in [3]. A
program called the Complexity/Path Analyzer (CPA) has
been developed that builds and utilizes a program
cyclomatic tree to provide test planning information,
automatically place software counters called probes as
discussed in [9] and [10] in a program, and provide
selected parameters such as program length and program
graph cyclomatic number. The paper discusses the
features and derivation of cyclomatic trees as well as
their value and application to testing and test data
generation. A cyclomatic tree provides a test planner
with information useful for planning program tests. In
particular, it furnishes test data selection criteria
for developing tests that are minimally thorough as
defined by Huang in [9]. A test data selection
criterion will be defined as minimally thorough if any
complete test with respect to the criterion is at least
minimally thorough. The term complete is used as
defined by Goodenhough and Gerhart in [13]. A test is
defined to be a non empty sequence of test cases. Each
test case consists of an element selected from the
input domain of the program being tested. The paper
discusses the merits of one particular technique
selected to achieve a minimally thorough test data
selection criteria. Part of the technique is automated
by the CPA program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fischer:1978:SQA,
author = "Kurt F. Fischer",
title = "Software quality assurance tools: {Recent} experience
and future requirements",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "116--121",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811110",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The objective of software quality assurance (QA) is to
assure sufficient planning, reporting, and control to
affect the development of software products which meet
their contractual requirements. To implement this
objective, eight QA functions can be identified: 1.
Initial quality planning 2. Development of software
standards and procedures 3. Development of quality
assurance tools 4. Conduct of audits and reviews 5.
Inspection and surveillance of formal tests 6.
Configuration verifications 7. Management of the
discrepancy reporting system 8. Retention of QA records
The purpose of this paper is to document experiences
gained in the use of selected QA tools that perform
some of the above functions, to discuss lessons
learned, and to suggest future needs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glasser:1978:ESC,
author = "Alan L. Glasser",
title = "The evolution of a {Source Code Control System}",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "122--125",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811111",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Source Code Control System (SCCS) is a system for
controlling changes to files of text (typically, the
source code and documentation of software systems). It
is an integral part of a software development and
maintenance system known as the Programmer's Workbench
(PWB). SCCS has itself undergone considerable change.
There have been nine major versions of SCCS. This paper
describes the facilities provided by SCCS, and the
design changes that were made to SCCS in order to
provide a useful and flexible environment in which to
conduct the programming process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Josephs:1978:MCB,
author = "William H. Josephs",
title = "A mini-computer based library control system",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "126--132",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811112",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the major problems encountered in any large
scale programming project is the control of the
software. Invariably, such large programs are divided
into many smaller elements since these are easier to
code, test and document. However, such a division adds
new complexity to the task of Configuration Management
since the many source modules, data base elements, JCL
(Job Control Language) and DATA files must be
controlled with the goal of maximizing program
integrity and minimizing the chances of procedural
errors. Furthermore, whenever any program is released
either for field test or for final production, an
entire change control procedure must be implemented in
order to trace, install, debug and verify fixes or
extensions to the original program. These maintenance
activities can account for up to 80 percent of the
entire programming cost in a large, multi-year project.
The library control program (SYSM) presented here was
developed to aid in these processes. It has facilities
for capturing all elements of a program (commonly
called baselining), editing any element or group of
elements that have been baselined to build an updated
version of the program, adding and/or deleting elements
of a program, and listing the current contents of a
given element or elements. SYSM is written mainly in
FORTRAN, and runs on a Hewlett--Packard HP-21MX
computer with two tape drives, the vendor supplied
RTE-II or RTE-III operating system, and at least 16K of
user available core. It can be used to control code
targeted for either the HP21MX itself, or, using the
optional HP/LSI-11 link program, code targeted for a
Digital Equipment Corp. LSI-11 system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cavano:1978:FMS,
author = "Joseph P. Cavano and James A. McCall",
title = "A framework for the measurement of software quality",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "133--139",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811113",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Research in software metrics incorporated in a
framework established for software quality measurement
can potentially provide significant benefits to
software quality assurance programs. The research
described has been conducted by General Electric
Company for the Air Force Systems Command Rome Air
Development Center. The problems encountered defining
software quality and the approach taken to establish a
framework for the measurement of software quality are
described in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cobb:1978:MSU,
author = "Gary W. Cobb",
title = "A measurement of structure for unstructured
programming languages",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "140--147",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811114",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Software Science is a field of Natural Science which
deals with the development of measurements which reveal
properties of software programs. These measurements are
qualified as to their degree of correlation to human
beings being able to construct or understand a subject
program. Maurice Halstead has pioneered much of the
theories in this field ((5) through (10)), which
applies statistical and psychological testing
techniques to the evaluation of the measurements. The
basic inputs to the Halstead predictors are easily
measured: the number of distinct operators and
operands, and the number of occurrences of the
operators and operands. Due to the statistical nature
of the measurements, there can be erroneous results
when applying them to small sample spaces. However, the
predictors are very adequate when applied to large
samples, that is, large software systems. In an
excellent review article by Fitzsimmons and Love (4),
it is pointed out that several of the estimators
defined by Halstead assumed that the subject programs
were well-structured, and inaccuracy in the predictors
can result if they are applied to `unpolished'
programs. In fact, Halstead qualified six classes of
impurities in code which can cause the length predictor
to be inaccurate. The definition of volume for
software, another predictor introduced in Halstead's
book, is related to the level of the specification of
the program. An algorithm which is written in assembly
language will have a greater volume than the same
algorithm written in Pascal, due to the richness of the
semantic constructs that are available in the
higher-level languages. Hence, this predictor is
language dependent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bowen:1978:CAS,
author = "John B. Bowen",
title = "Are current approaches sufficient for measuring
software quality?",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "148--155",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811115",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Numerous software quality studies have been performed
over the past three years-mostly sponsored by the Rome
Air Development Center. It is proposed by the author
that more emphasis should be placed on devising and
validating quantitative metrics that are indicative of
the quality of software when it is being designed and
coded. Such measures could be applied effectively, as
relative guidelines without formal validation. However
for such measures to be predictive of the quality of
the delivered software, they must be validated with
actual operational error data or data gathered in a
simulated operational environment. This paper includes
a review of proposed metrics from the literature a
report of a Hughes intramodule metric study, and
recommendations for refining proposed software quality
assurance criteria.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lockett:1978:UPM,
author = "Joann Lockett",
title = "Using performance metrics in system design",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "156--159",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811116",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Complexities of system design are great and often lead
designers to be inward looking in their analyses.
Knowledge from various fields can be of benefit in
designing systems [1]. Management accountants can
describe economic effects of delays in closing
schedules, psychologist can provide significant
insights into the behavioral characteristics of users
to complex command syntax, computer performance
analysts can provide alternatives to describe and to
measure responsiveness of systems. Even in the case of
an innovative system design, the designer can employ
such approaches to identify incipient problems and
create alternatives with increased cost effectiveness.
This paper describes how performance metrics can be
used effectively to support system design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Southworth:1978:RM,
author = "Richard N. Southworth",
title = "Responding to {MIL-S-52779}",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "160--164",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811117",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The art and science of computer software development
is still changing considerably from year to year, and
therefore lacks the established control mechanisms of
hardware production programs. Also, because most
software is produced in a one-time development program
it does not lend itself to the established discrepancy
detection and correction techniques used in hardware
production programs. Consequently, the software QA
program must provide the methodology to detect a
deficiency the first time it occurs and effect
corrective action. MIL-S-52779: ``Software Quality
Assurance Program Requirements,'' has provided a much
needed impetus for software development contractors to
develop software QA techniques. But much remains to be
done. As the state of the art advances MIL-S-52779
should be revised accordingly. In this paper the author
responds to the present form of the specification,
suggests some revisions and additions and briefly
discusses a set of QA procedures that should be
responsive (fully compliant) with MIL-S-52779.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tighe:1978:VPS,
author = "Michael F. Tighe",
title = "The value of a proper software quality assurance
methodology",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "165--172",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the experiences of a project
development team during an attempt to ensure the
quality of a new software product. This product was
created by a team of software engineers at Digital
Equipment Corporation, a mainframe manufacturer. As a
result, the definition of ``to ensure the quality of a
software product'' meant minimizing the maintenance
costs of the new product. Ease of maintenance and a low
bug rate after release to the customer were very
important goals from the beginning of the project. This
paper compares the degree of application and resultant
effects of several software quality assurance
methodologies upon different parts of the final
product. Many of the product's subsystems were created
using all of the discussed methodologies rigorously.
Some subsystems were created with little or no use of
the methodologies. Other subsystems used a mixture. The
observed quality of the various subsystems when related
to the methodology used to create them provides
insights into the interactions between the
methodologies. These observations also supply
additional experience to reinforce established beliefs
concerning the value of quality assurance
methodologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Belford:1978:QEE,
author = "Peter Chase Belford and Carlo Broglio",
title = "A quantitative evaluation of the effectiveness of
quality assurance as experienced on a large-scale
software development effort",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "173--180",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of quality assurance on software projects
is to achieve high quality products on schedule, within
cost, and in compliance with contract requirements.
However, historically, the effectiveness of these
activities on software projects has not been
quantitatively demonstrable because of a lack of data
collected on the project combined with a lack of
insight into the operational reliability of the system.
Quality assurance is a collection of activities on a
contractual deliverable whose purpose is to impart a
degree of confidence that the deliverable will conform
to the customer's concept of what was procured. Under
these conditions, quality assurance must be performed
with respect to a documented baseline of the concept.
This baseline can address the need in the form of
requirement statements; the conceptual approach to be
followed in the form of a functional specification; or
the design to be implemented in the form of a design
specification. Further, these baselines are
hierarchical in the sense that when quality assurance
is applied to a level it is implicitly applied to all
lower levels; e.g., if the need is to be satisfied, the
conceptual approach must be satisfied. Effective
quality assurance programs impart a high degree of
confidence to the customer without significant impacts
on schedule or cost. Historically, this effectiveness
has not been quantitatively demonstrable because of a
lack of data collected on the project combined with a
lack of insight into the operational reliability of the
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kacik:1978:ESQ,
author = "Paul J. Kacik",
title = "An example of software quality assurance techniques
used in a successful large scale software development",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "181--186",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Development of the software package for the Combat
Grande Air Defense System was considered by the Hughes
Aircraft Company to be highly successful in that a
reliable system was produced that met customer
requirements and was completed within time and budget
allocations --- a feat not often attained in large
scale software developments. Much of the success can be
attributed to the software quality assurance (QA)
techniques used. Some of these QA techniques are listed
in Table 1 along with the phases in which they were
used. This paper describes these QA techniques in some
detail, as well as those aspects of the system and
software development program that permitted these
techniques to be used effectively. Background
information is presented first which describes the
system, software, organization and software
configuration management. This is followed by a
description of the three major phases of software
development. The overall results are then presented,
followed by recommended improvements and conclusions.
Many of the QA techniques listed in Table 1 were used
in several phases of software development. However, a
particular technique is discussed only in the phase in
which it was most extensively used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kreutzer:1979:CSM,
author = "Wolfgang Kreutzer",
title = "Computer system modelling and simulation",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "9--35",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To evaluate the suitability and limitations of
software for computer systems modelling, a basic
comprehension of the structure of such tools must be
provided. A brief discussion of conceptual requirements
for the description of discrete models, and computer
system models in particular, is followed by a survey of
commercially available computer simulation packages.
Special and general purpose discrete event simulation
and general purpose programming languages are also
analysed for their suitability for this class of
applications. The survey closes with some
recommendations and guidelines for selection and
application of computer system simulation tools. To aid
the analyst contemplating a computer system modelling
project, a brief list of relevant addresses and
annotated references is also included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Turner:1979:ISM,
author = "Rollins Turner",
title = "An investigation of several mathematical models of
queueing systems",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "36--44",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of simple mathematical models were used to
predict average response time of a timesharing system.
The target system was a very simple trace driven
simulation model, but the workloads were trace files
obtained from a real system in normal operation. As
such, the workloads were characterized by very high
coefficients of variation in resource demands and think
times. Mathematical models of the system included
independent arrival models (M/M/1 and M/G/1, closed
network models) admitting product from solutions, and a
more general Markov model. Only the final model
produced reasonable accuracy. A number of experiments
were performed, in an effort to determine what
properties of the system being modeled were responsible
for the failure of all the simple mathematical models.
The large variance in CPU time and the fact that the
system was a closed network were found to be critical
factors, and appeared to be the major causes for
failure of models that do not take them into account.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sauer:1979:CIQ,
author = "Charles H. Sauer",
title = "Confidence intervals for queueing simulations of
computer systems",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "45--55",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulation models of computer systems may be
formulated as queueing networks. Several methods for
confidence interval estimation for queueing simulations
are discussed. Empirical studies of these methods are
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleijnen:1979:NCS,
author = "Jack P. C. Kleijnen",
title = "A note on computer system data gathering",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "56--56",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently Orchard (1977) proposed a statistical
technique for data collection in computer systems. A
main idea was the use of random sampling, as opposed to
traditional fixed periodic sampling. He further
proceeded to derive confidence intervals for the
resulting estimator. He also proposed the use of binary
(Boolean) variables, e.g., $ q_{it} = 1 $ (or $0$) if
at sampling time $t$ the $i$ th `slot' of a queue is
occupied (or empty respectively).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1979:PPV,
author = "M. K. Rajaraman",
title = "Performance prediction of a virtual machine",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "57--62",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modeling and simulation of computer systems have two
main objectives. First, to evaluate the performance of
a given configuration of a machine and second, to
derive a mechanism for prediction of performance when
configuration parameters change. This paper addresses
the second issue and reports the result of a recent
investigation of a Virtual Memory Computer. The results
indicate which variables or combination of variables
have significant effect on the performance and which do
not.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jain:1979:GSA,
author = "Aridaman K. Jain",
title = "A guideline to statistical approaches in computer
performance evaluation studies",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "63--77",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schwartz:1979:DCC,
author = "E. Schwartz",
title = "Development of credible computer system simulation
models",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "78--95",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Problems encountered during a simulation effort
are influenced by the objectives of the simulation.
Verification and validation of the simulation model are
two such problems which affect the credibility (and
usability) of the model. A simulation methodology for
Program Design Analysis is described. The goal of this
simulation application is to test a design before it is
implemented. Techniques are described which enhance the
credibility of simulation models. The relationship
between Program Design Analysis and the reliability of
the system being developed is explored.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1979:CPE,
author = "Jon D. Clark and Thomas J. Reynolds and Michael J.
Intille",
title = "Computer performance evaluation: an empirical
approach",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "97--101",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance evaluation can be delineated into
the areas of selection, projection and monitoring. The
tuning of existing systems for efficient performance
may be viewed as a special case of the projection
activity involving modeling, statistics collection and
analysis. Mosts tools available today are expensive to
use and overly complicated. This paper presents the
comparison of two, relatively simple and
cost-effective, statistical techniques for performance
evaluation: regression and canonical analysis. In
addition, the results of the suggested and implemented
computer configuration modification is reported.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "canonical analysis; computer performance evaluation;
multi-processor; regression analysis",
}
@Article{Willis:1979:TSW,
author = "Ron Willis",
title = "Techniques in simulation which enhance software
reliability",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "102--115",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A simplified simulation study of an actual software
development effort is presented. A model is developed
and exercised through various stages of modifications
to an originally unreliable soft ware design until
viable software design results. Techniques in model
development, simulation, analysis, and language
capability which lead to enhanced software reliability
are discussed. Uniquenesses in the approach presented
are contrasted to simulation methods which lack this
capability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1979:TSM,
author = "Russ Blake",
title = "{Tailor}: a simple model that works",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "1--11",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805444",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tailor is an atomic model of the Tandem/16
multiple-computer system. Atomic modeling is based on
operational analysis and general considerations from
queueing theory. Measurements of system atoms define
the underlying components of processor usage. The
workload is described to the model through a separate
set of measurable parameters that comprise the workload
atoms. Simple formulae from operational analysis are
then applied to predict the amount of equipment
necessary to support the projected application.
Tailor's accuracy was tested under two very different
workloads. For both a large backend database
application and a program development system, Tailor
was able to predict the equipment needed to handle the
workloads to within 5 percent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Strecker:1979:ACP,
author = "William D. Strecker",
title = "An analysis of central processor-input-output
processor contention",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "27--40",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805445",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most computer systems have separate central (CPU) and
input-output (IOP) processors to permit simultaneous
computation and input-output (I/O). It is conventional
in such systems to avoid any loss of I/O data by
granting the IOP priority over the CPU for memory
service. Although this priority discipline is simple to
implement it may result in a maximum degradation of CPU
performance. In this discussion an analysis of the IOP
priority discipline is given together with an analysis
of other priority disciplines which require the
buffering of IOP requests and results are given showing
that only a small amount of buffering is required to
produce a noticeable improvement in CPU performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Contention; CPU; I/O interference; Input-output;
Memory system; Priority discipline; Processor",
}
@Article{Wiecek:1979:PST,
author = "Cheryl A. Wiecek and Simon C. {Steely, Jr.}",
title = "Performance simulation as a tool in central processing
unit design",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "41--47",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805446",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance analysis has always been considered
important in computer design work. The area of central
processing unit (CPU) design is no exception, where the
successful development of performance evaluation tools
provides valuable information in the analysis of design
tradeoffs. Increasing integration of hardware is
producing more complicated processor modules which add
to the number of alternatives and decisions to be made
in the design process. It is important that these
modules work together as a balanced unit with no hidden
bottlenecks. This paper describes a project to develop
performance simulation as an analysis tool in CPU
design. The methodology is first detailed as a three
part process in which a performance simulation program
is realized that executes an instruction trace using
command file directions. Discussion follows on the
software implemented, applications of this tool in CPU
design, and future goals.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bennett:1979:SDS,
author = "David A. Bennett and Christopher A. Landauer",
title = "Simulation of a distributed system for performance
modelling",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "49--56",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805447",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A distributed system of cooperating minicomputers is
simulated by AIMER (Automatic Integration of Multiple
Element Radars) to model and analyze the behavior of a
radar tracking system. Simulation is applied in the
AIMER project in an attempt to model a network of
minicomputers to discover a maximally flexible network
architecture. Because building the tracking system out
of real hardware would not result in a flexible enough
testbed system, the proposed configuration is
represented by a software emulation. The instruction
sets of the individual processors are emulated in order
to allow separation of the measurement facilities from
the execution of the system. The emulation is supported
by a Nano-data QM-1 micro and nano-programmable host.
Extensive performance monitoring hooks have been built
into the emulation system which allow small performance
perturbations to become visible. The tracking network
is controlled by a combination firmware operating
system and a special emulated virtual control machine.
The tracking algorithms run on virtual machines whose
instruction sets and computational throughput can be
parameterized when the model is generated, or
dynamically by an operator during a run. The radar and
ground truth environments for the tracking system are
simulated with logic resident in one of the emulated
machines, allowing these functions to be monitored as
accurately as the tracking algorithms. The use of this
simulation technique has resulted in an extremely
flexible testbed for the development of distributed
radar tracking system models. The testbed itself can be
quickly tailored to other application problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazowska:1979:BTA,
author = "Edward D. Lazowska",
title = "The benchmarking, tuning and analytic modeling of
{VAX\slash VMS}",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "57--64",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805448",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a recent experience in
benchmarking, tuning and modelling Digital Equipment
Corporation's VMS executive running on their VAX-11/780
computer. Although we emphasize modelling here, the
three aspects are closely interrelated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marshall:1979:AMW,
author = "William T. Marshall and C. Thomas Nute",
title = "Analytic modelling of ``working set like'' replacement
algorithms",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "65--72",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805449",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although a large amount of theoretical work has been
performed in the analysis of the pure working set
replacement algorithm, little has been done applying
these results to the approximations that have been
implemented. This paper presents a general technique
for the analysis of these implementations by analytic
methods. Extensive simulations are reported which
validate the analytic model and show significant
simplifications that can be made with little loss of
accuracy. The problem of choosing memory policy
parameter values is examined and related in a simple
way to the choice of a working set window size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Briggs:1979:EBM,
author = "Fay{\'e} A. Briggs",
title = "Effects of buffered memory requests in multiprocessor
systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "73--81",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805450",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A simulation model is developed and used to study the
effect of buffering of memory requests on the
performance of multiprocessor systems. A multiprocessor
system is generalized as a parallel-pipelined processor
of order $ (s, p) $, which consists of $p$ parallel
processors each of which is a pipelined processor with
$s$ degrees of multiprogramming, there can be up to $
s*p$ memory requests in each instruction cycle. The
memory, which consists of $ N ( = 2^n)$ identical
memory modules, is organized such that there are $ \ell
( = 2^i)$ lines and $ m ( = 2^{n - i})$ identical
memory modules, where each module is characterized by
the address cycle (address hold time) and memory cycle
of $a$ and $c$ time units respectively. Too large an $
\ell $ is undesirable in a multiprocessor system
because of the cost of the processor-memory
interconnection network. Hence, we will show how
effective buffering can be used to reduce the system
cost while effectively maintaining a high level of
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raffi:1979:ECB,
author = "Abbas Raffi",
title = "Effects of channel blocking on the performance of
shared disk pack in a multi-computer system",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "83--87",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805451",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a multi-computer environment where several
computers share packs of disk drives, the architecture
of the disk controller can have significant effect on
the throughput of the disk pack. In a simple
configuration a controller can allow access to only one
disk in the pack at a time, and effectively block other
channels from accessing other disks in the pack. A
desirable alternative is to be able to access different
disks of the same pack simultaneously from different
channels. Motivated by the presence of a mixed hardware
in an installation to support both configurations, an
attempt is made to model each system and produce
analytical and simulation results to compare their
relative performances. It is predicted that under the
prevalent conditions in the installation, a complete
switchover to either system should not give rise to
significant performance change.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zahorjan:1979:ESM,
author = "John Zahorjan",
title = "An exact solution method for the general class of
closed separable queueing networks",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "107--112",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805452",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a convolution algorithm for
the full class of closed, separable queueing networks.
In particular, the algorithm represents an alternative
method to those already known for the solution of
networks with class changes, and is the first efficient
algorithm to deal with Lam-type networks [11]. As an
application of the algorithm, we study a simple
queueing network with disk I/O devices connected to a
single CPU through a single channel. The algorithm is
then used to develop a simple, accurate approximation
for the blocking of disk devices that takes place when
a customer using a disk is waiting for or in service at
the channel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kienzle:1979:SAQ,
author = "Martin G. Kienzle and K. C. Sevcik",
title = "Survey of analytic queueing network models of computer
systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "113--129",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805453",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of case studies involving the use of queueing
network models to investigate actual computer systems
are surveyed. After suggesting a framework by which
case studies can be classified, we contrast various
parameter estimation methods for specifying model
parameters based on measurement data. A tabular summary
indicates the relationships among nineteen case
studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Landry:1979:SEP,
author = "Steve P. Landry and Bruce D. Shriver",
title = "A simulation environment for performing dataflow
research",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "131--139",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805454",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dataflow languages and processors are currently being
extensively studied because of their respective ability
to specify and execute programs which exhibit a high
degree of parallel and/or asynchronous activity [12,
7]. This paper describes a comprehensive simulation
environment that allows for the execution and
monitoring of dataflow programs. One overall objective
of this facility was to meet the needs of researchers
in such diverse areas as computer architecture,
algorithm analysis, and language design and
implementation. Another objective was to accommodate
the semantics of several of the contending abstract
dataflow models [2, 4]. Additionally, it was desired to
enhance the abstract dataflow models which the
simulator would support. These objectives, combined
with the desired debugging and metering requirements,
directed the design of the overall system. A brief
introduction to dataflow and its related terminology is
given to assist the reader. A companion paper [6]
describes an augmentation to the basic simulation
facility presented here that allows for the execution
of dataflow programs on processors having finite
resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Langan:1979:SED,
author = "David D. Langan and Bruce D. Shriver",
title = "Simulated execution of dataflow programs on processors
having finite resources",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "141--149",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805455",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dataflow languages and processors are currently being
extensively studied because they provide for the
specification and realization of processes exhibiting a
high degree of parallel and/or asynchronous activity
[12, 8]. Several researchers have developed simulators
for specific candidate dataflow architectures in which
there are essentially an infinite number of resources
available to the nost machine [9, 1]. This is done to
study the degree of parallelism which is achievable
with a given version of an algorithm. However, it is an
equally important (and neglected) area to study the
behavior of programs executing in candidate computer
systems having a finite amount of resources. This paper
presents results which have been obtained from such
modeling. It is shown that in such a system certain
``critical nodes'' must be given priority of execution
when competing with other nodes for the same resources
in order to achieve the maximum system throughput. It
is suggested that the abstract dataflow model be
modified to accommodate such situations. Various design
trade-offs associated with the implementation of the
simulator are discussed along with a description of
available features. A companion paper [6] describes the
general dataflow simulation facility which provided the
basis of this work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Unger:1979:OSI,
author = "Brian W. Unger and James R. Parker",
title = "An operating system implementation and simulation
language {(OASIS)}",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "151--161",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805456",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An approach to the implementation and simulation of
system software for multicomputer architectures is
described. OASIS, a variant of the SIMULA 67 language,
provides tools for both hardware modelling and system
software development. The latter includes an extensible
module type with flexible intermodule access control.
Hardware is characterized at the processor/memory level
so that system software resource control and allocation
policies can be implemented at a functional level.
Concurrent module execution by multiple processors,
with or without shared memory, can be simulated
directly. The OASIS modules in such a simulation can
closely parallel the structure of actual system
software. Thus, once a design is shown viable by
simulation, the implementation of actual software can
be a simple translation of OASIS modules. A brief
overview of OASIS features is presented followed by a
simple example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sanguinetti:1979:TIS,
author = "John Sanguinetti",
title = "A technique for integrating simulation and system
design",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "163--172",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805457",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A technique for simulating incomplete systems is given
which allows performance prediction during system
design. This technique, called integrated simulation,
allows the system design to itself be a simulation
model, thus avoiding the overhead of maintaining a
separate, valid simulation model for the system. The
paper presents integrated simulation in the framework
of a system modeling language called the Program
Process Modeling Language, PPML. This language provides
a means for describing systems of concurrent processes
in both abstract and explicit terms, thus lending
itself well to a top-down design method. In the design
process, any PPML representation of the system can be
simulated directly, from the most abstract design to
the completely elaborated system. Simulation of the
completely elaborated system is, in fact, simply the
system in execution. The paper defines PPML and
describes the techniques required to simulate PPML
systems given various underlying machines. It concludes
with a discussion of the limitations of the integrated
simulation method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Razouk:1979:EMS,
author = "Rami R. Razouk and Mary Vernon and Gerald Estrin",
title = "Evaluation methods in {SARA} --- the graph model
simulator",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "189--206",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805458",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The supported methodology evolving in the SARA (System
ARchitects' Apprentice) system creates a design
frame-work on which increasingly powerful analytical
tools are to be grafted. Control flow analyses and
program verification tools have shown promise. However,
in the realm of the complex systems which interest us
there is a great deal of research and development to be
done before we can count on the use of such powerful
tools. We must always be prepared to resort to
experiments for evaluation of proposed designs. This
paper describes a fundamental SARA tool, the graph
model simulator. During top-down refinement of a
design, the simulator is used to test consistency
between the levels of abstraction. During composition,
known building blocks are linked together and the
composite graph model is tested relative to the lowest
top-down model. Design of test environments is
integrated with the multilevel design process. The SARA
methodology is exemplified through design of a higher
level building block to do a simple FFT.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:1979:MSD,
author = "Stone H. Yu and Tadao Murata",
title = "Modeling and simulating data flow computations at
machine language level",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "207--213",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805459",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is concerned with the data flow
organization of computers and programs, which exhibits
a good deal of inherent concurrencies in a computation
by imposing no superfluous precedence constraints. In
view of the popularity of parallel and distributed
processing, this organization can be expected to play
an increasingly prominent role in the design and
development of computer systems. A schematic diagram
called DF-graphs, suitable for modeling data flow
computations at the machine language level, is
introduced. To facilitate the storage of DF-graphs in
computers, matrix equations which fully describe their
structure and their dynamic behaviors are developed as
an alternate representation. Also demonstrated is the
feasibility of simulating the execution of computations
specified by DF-graphs on a network of conventional
mini- and microprocessors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mattheyses:1979:MSA,
author = "R. M. Mattheyses and S. E. Conry",
title = "Models for specification and analysis of parallel
computing systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "215--224",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805460",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of designing a properly functioning
parallel hardware or software system is considerably
more difficult than that of designing a similar
sequential system. In this paper we formulate criteria
which a design methodology for parallel systems should
satisfy and explore the use of various models as the
basis for such a design tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gertner:1979:PEC,
author = "Ilya Gertner",
title = "Performance evaluation of communicating processes",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "241--248",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805461",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper concerns the performance evaluation of an
operating system based on communicating processes.
Processes communicate via messages and there is no
shared data. Execution of a program is abstracted as a
sequence of events to denote significant computational
steps. A finite state machine model of computation is
used for the specifications of abstract computational
properties and, thereafter, for the selective analysis
of measurement data. A set of conventions is developed
to characterize the performance of communicating
processes. A hierarchical layering technique is used to
concisely describe the characteristics of large
systems. A performance monitoring system was
implemented and applied to the analysis of RIG, a
message-based operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spooner:1979:BIS,
author = "Christopher R. Spooner",
title = "Benchmarking interactive systems: {Producing} the
software",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "249--257",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805462",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The author has recently developed a new methodology of
benchmarking, which is being applied to a procurement
in which (a) a single integrated interactive
application is to span a distributed configuration of
computing hardware, (b) the configuration is unknown
when the benchmark is being developed, and (c) the
application software will be written after the
benchmark has been run. The buyer prepares a simulation
model of the intended application in the form of
programs that will run on the hardware being
benchmarked. Each competing vendor is expected to tune
the performance of this model to the hardware
configuration that he has proposed, so he will require
several versions of the model. This presents the buyer
with a formidable software-production problem, which is
further complicated by a requirement for extreme
flexibility and reliability. The paper addresses the
software-production problem and describes its solution.
The solution was to develop an automated
code-production system based on two principal design
features. First, the model and its translator are both
written in the same language; secondly, the common
language is selected on the basis of readability and
extensibility. The paper examines why this approach to
the code-production problem was successful. Though the
code-production system was developed to support a
particular benchmarking approach, it should also be
useful in other modeling situations. Indeed it might be
of interest in any field where readability,
reliability, ease of maintenance, and economy of
programming effort are considered important.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dujmovic:1979:CCP,
author = "Jozo J. Dujmovi{\'c}",
title = "Criteria for computer performance analysis",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "259--267",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805463",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer evaluation, comparison, and selection is
essentially a decision process. The decision making is
based on a number of worth indicators, including
various computer performance indicators. The
performance indicators are obtained through the
computer performance measurement procedure.
Consequently, this procedure should be completely
conditioned by the decision process. This paper
investigates various aspects of computer performance
measurement and evaluation procedure within the context
of computer evaluation, comparison and selection
process based on the Logic Scoring of Preference
method. The set of elementary criteria for performance
evaluation is proposed and the corresponding set of
performance indicators is defined. The necessary
performance measurements are based on the standardized
set of synthetic benchmark programs and include three
separate measurements: monoprogramming performance
measurement, multiprogramming performance measurement,
and multiprogramming efficiency measurement. Using the
proposed elementary criteria, the measured performance
indicators can be transformed into elementary
preferences and aggregated with other non-performance
elementary preferences obtained through the evaluation
process. The applicability of presented elementary
criteria is illustrated by numerical examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
xxauthor = "Jozo J. Dujomovi{\'c}",
}
@Article{Dyal:1979:SBS,
author = "James O. Dyal and William {DeWald, Jr.}",
title = "Small business system performance analysis",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "269--275",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805464",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents results from the performance
simulation study of a small business-oriented computer
system. The system, SPERRY UNIVAC BC/7-700, is
commercially available in the configuration modeled and
in other higher performance models. All BC/7 systems
modeled are supported with highly interactive
applications software systems. The model is
parameterized to select one or more workstations and
one or more cartridge disks. File allocations are by
cylinder. Seek times are computed by remembering the
position of each movable arm. References are randomized
within each file, but the sequence in which files are
accessed is controlled by the application logic, in
conjunction with the number of line items/order. Most
event times are not constant, but the result of drawing
randomly against empirical distributions with specified
mean and standard deviation. For this study, the system
simulated is composed of a single work-station running
the highly interactive on-line version of a
sophisticated order entry application package.
Principal performance measures are system throughput
and response time, including operator action times. It
is found that, in the single workstation environment,
performance is very cost effective in this highly
competitive part of the information system market.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huff:1979:SCR,
author = "Robert W. Huff",
title = "System characterization of a {Retail Business
System}",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "277--284",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805465",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexities of Retail Business Systems today
require a thorough understanding of how functional
requirements impact desired system performance. It is
no longer feasible to discretely test and evaluate
individual system components without considering their
inter-relationship. The techniques described in this
presentation will define the method of system
characterization of products prior to customer
delivery. Three techniques are utilized to characterize
system performance --- simulation, stimulation, and
performance measurement. Simulation involves writing a
mathematical model which is enhanced from a product
feasibility model to a system configuration tool as a
result of stimulation and measurement activities.
Stimulation consists of using emulators to load the
system component under test as if the actual system is
inter-connected. The emulators are programmed to
produce a processing volume which can exceed the peak
benchmark of the potential user. Performance
measurement is accomplished during the stimulation
activity using hardware/ software probes to monitor
specific system parameters. These monitors provide
vital information to determine total system capacity
and the expected system performance for a given
configuration. The information derived from system
characterization is invaluable in providing the
customer with a realistic expectation of system
capability to perform its present functions and in
projecting future growth potential.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stroebel:1979:FPA,
author = "Gary Stroebel",
title = "Field performance aids for {IBM GSD} systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "285--291",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805466",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A series of field performance aids have been developed
to assist IBM Systems Engineers evaluate the
performance of System/3, System/34, and System/38
configurations. Use of those aids is appropriate at
proposal time, for preinstallation design, for tuning,
and for upgrade studies. This paper overviews some of
the key features of these aids as they pertain to the
user interface, workload characterization, and
performance models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1979:FAP,
author = "Jon D. Clark",
title = "A feature analysis of performance evaluation texts",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "9--11",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Dowdy:1979:SWT,
author = "Lawrence W. Dowdy",
title = "Synopsis of workshop on the theory and application of
analytical models to {ADP} system performance
prediction",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "13--17",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Petrella:1979:SWS,
author = "Arthur Petrella and Harold Farrey",
title = "Simulating working sets under {MVS}",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "24--36",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pierson:1979:PEM,
author = "Daniel L. Pierson",
title = "Performance evaluation of a minicomputer-based data
collection system",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "37--44",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Artis:1979:CPM,
author = "H. Pat Artis",
title = "Capacity planning for {MVS} computer systems",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "45--62",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1979:PVM,
author = "M. K. Rajaraman",
title = "Performance of a virtual memory: some experimental
results",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "63--68",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports the results of simulation
experiment of a model of a virtual memory computer. The
model consists of three major subsystems: Program
Behavior, Memory Allocation and Secondary Storage. By
adapting existing models of these subsystems an overall
model for the computer operation is developed and its
performance is tested for various design alternatives.
The results are reported for different paging devices,
levels of multiprogramming, job mixes, memory
allocation scheme, page service scheduling and page
replacement rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1980:WWS,
author = "Peter J. Denning",
title = "What's a working set?",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "6--10",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I am writing about the paper by A. Petrella and H.
Farrey, of IBM, SIMULATING WORKING SETS UNDER MVS,
reprinted in the SIGMETRICS Newsletter, Issue (8, 4),
winter 1979-80. The paper is an amalgam of very good
modeling work and misinformation about the working set
concept. I will summarize the important contributions
and give a short essay about working sets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Petrella:1980:SWS,
author = "Arthur Petrella and Harold Farrey",
title = "Simulating working sets under {MVS}",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "11--23",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041874",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure of ten ranks high
in curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal it self for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1980:MEP,
author = "Mitchell G. Spiegel",
title = "Measuring and evaluating performance",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "33--34",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The subject of system performance measurement and
evaluation has undergone as many generations of changes
as the systems themselves. The problem of what to
measure and evaluate is complicated by the fact that
computing and communications, having become technically
similar (digital), will undergo further fusion. Because
the technologies are merging, a comparison of their
respective origins is instructive. Communications and
computing do not share a common history. Communications
performance evaluation began as a turn-of-the-century
issue. Important performance attributes of voice
communications systems were accessability and
reliability. The general public and communications
system analysts always viewed the voice communications
systems as a bundled service, with little emphasis on
the characteristics of its individual components.
Performance was `engineered' into communications
systems for given workload capacity levels (traffic). A
reliable service offering evolved over two decades
(1920's and 1930's) and was expanded to include data as
well as voice communications. The voice network used
primarily analog transmission techniques, because voice
traffic grew far more rapidly than data. Pulse code
modulation (PCM) techniques, employing digital
transmission, reversed the trend of analog circuitry.
In the future, communications transmission, switching,
and integrated services networks (voice, data,
facsimile, picture) will be implemented exclusively
with digital techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dixon:1980:PMI,
author = "P. J. Dixon",
title = "Planning {MIS} investment and expense levels",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "35--37",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Asking for capital for data processing and
telecommunications equipment in not exactly popular
with most Boards of Directors in most companies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moran:1980:CPV,
author = "Thomas S. Moran",
title = "Capacity planning: `the volume'",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "38--40",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Some comments on past, present, and future measures of
volume as it affects planning for computer systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeMarco:1980:BLB,
author = "Tom DeMarco",
title = "Breaking the language barrier",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "41--45",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The systems analyst and the user are not independent
entities; each depends on the other. When communication
problems get in their way, however, the relationship
can turn adversary. The real problem in most system or
program development efforts may be that English, left
to itself, is too subtle, too open to personal
interpretation, to be appropriate in the structured
world of DP.Tom DeMarco shows how to impose limits on
our native language so analysts, designers, programmers
and users can safely use it to define what they are
trying to develop. This week he starts by giving some
hints on that most basic of DP jobs, setting up the
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Giles:1980:CSM,
author = "Howard L. Giles",
title = "Communications systems management",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "46--51",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As data processing systems have grown from primarily
batch-oriented applications to today's fairly extensive
on-line systems, the management system required to
control these resources has changed. This system
evolution is forcing management to focus their
attention on controlling the distribution of
information to various users performing many diverse
applications. Communications Systems Management is the
process used to manage and control the distribution of
information in an on-line system for maximum
performance and productivity. It consists of those
techniques and tools needed to operate, maintain,
repair, install and plan for the continuous operation
of a communications-oriented information system. The
following pages describe the management functions
needed to ensure that on-line system operation will be
successful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Erlandson:1980:SEM,
author = "Robert F. Erlandson",
title = "System evaluation methodologies: combined
multidimensional scaling and ordering techniques",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "52--58",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is a difficult task to evaluate existing
large-scale systems; it is even more difficult to
evaluate alternative designs for future systems. Yet,
such decisions are necessary because of the long
development and implementation times involved.
Decisions must be made today about future systems for
telecommunications, power, health-care delivery,
transportation, etc. These systems change slowly
because additions or modifications are costly and must
mesh with the existing elements, hence, great care must
be given to the establishment of long-term goals and
the evaluation of alternative future system designs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pearson:1980:MCU,
author = "Sammy W. Pearson and James E. Bailey",
title = "Measurement of computer user satisfaction",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "59--68",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the development and evaluation of
a questionnaire designed to quantitatively measure
computer user satisfaction. The administration,
scoring, and interpretation of the questionnaire are
also addressed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandy:1980:CAP,
author = "K. Mani Chandy and Charles H. Sauer",
title = "Computational algorithms for product form queueing
networks",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "1--1",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806144",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the last two decades there has been special
interest in queueing networks with a product form
solution. These have been widely used as models of
computer systems and communication networks. Two new
computational algorithms for product form networks are
presented. A comprehensive treatment of these
algorithms and the two important existing algorithms,
convolution and mean value analysis, is given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Performance evaluation; Product form; Queueing
networks",
}
@Article{Gordon:1980:ICP,
author = "Karen D. Gordon and Lawrence W. Dowdy",
title = "The impact of certain parameter estimation errors in
queueing network models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "3--9",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806145",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The effect that parameter estimation errors have on
performance in closed product form queueing networks is
investigated. In particular, the effects of errors in
the relative utilization estimates of the servers are
analyzed. It is shown that in single class load
independent networks, the resulting errors in
throughput and utilizations are of approximately the
same percentage as the errors in the relative
utilization estimates. This result does not hold in
networks with load dependent servers or multiple
customer classes. The percentage errors in mean queue
length depend upon the degree of multiprogramming in
the network. Errors in mean queue lengths can become
unbounded as the degree of multiprogramming becomes
unbounded. Implications of these results to computer
system modeling are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1980:XIM,
author = "Russ Blake",
title = "{XRAY}: {Instrumentation} for multiple computers",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "11--25",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "XRAY presents a global view of the performance of
hardware and software components on multiple,
distributed computers. The set of components chosen for
measurement can be changed at any time throughout a
network of systems, and can be selected to minimize
data collection time and measurement space. In the
course of normal activities the operating system
executes firmware which increments counters for the
measured components. Periodically, the counters are
recorded in an ordinary file by a process in each
processor. An analysis program permits browsing through
components and plotting counters in real time. Analysis
focuses on detecting the distributed sources of
excessive activity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1980:DDA,
author = "James H. Hughes",
title = "{DIAMOND} a digital analyzer and monitoring device",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "27--34",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806147",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the design and application of a
special purpose computer system. It was developed as an
internal tool by a computer manufacturer, and has been
used in solving a variety of measurement problems
encountered in computer performance evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bashioum:1980:BIS,
author = "Douglas L. Bashioum",
title = "Benchmarking interactive systems: {Calibrating} the
model",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "35--41",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A methodology for benchmarking dedicated, interactive
systems has been developed at The MITRE Corporation.
This methodology uses a synthetic program model of the
application which runs on the proposed
hardware/operating system configurations and is driven
by a statistically derived load. System performance is
measured by analyzing the synthetic transaction
response times. The methodology yields assurances to a
buyer that the benchmarked system has at least an a
priori defined amount of computer power available for
applications-oriented software. This paper examines the
methodology and the problems that were encountered and
solutions which have been used in calibrating a
benchmark model for a specific application. The
benchmark was designed to model a large interactive
information processing application on a procurement
requiring loosely-coupled (no shared memory)
multicomputer systems. The model consists of a set of
interacting synthetic program cells, each composed of
several abstractly defined components. The model is
maintained in a very high level language that is
automatically translated into a standard High Order
Language (typically FORTRAN or COBOL) for delivery to
the competing vendors. These delivered model cells
contain automatically generated size and time filler
code that ``calibrate'' the cells to consume the
appropriate CPU time and memory space as defined by the
abstract size units after accounting for each vendor's
hardware and proposed system design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Benchmark; Calibration; Computer performance
measurement; Distributed processing; Interactive
systems; Modeling; Real-time; Simulation; Synthetic
programs",
}
@Article{Lehmann:1980:PEP,
author = "Axel Lehmann",
title = "Performance evaluation and prediction of storage
hierarchies",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "43--54",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806149",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a modelling methodology combining
simulation and analysis for computer performance
evaluation and prediction. The methodology is based on
a special workload model that is suitable for the
generation and description of dynamic program
behaviour. A description of this workload model is
given in section 2. The applicability of this concept
with respect to the design of new storage systems, as
well as the improvement or comparison of existing
systems, will be described by investigation of the
efficiency of small cache memories in section 3.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alanko:1980:MER,
author = "Timo O. Alanko and Ilkka J. Haikala and Petri H.
Kutvonen",
title = "Methodology and empirical results of program behaviour
measurements",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "55--66",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Program behaviour characteristics were examined using
data gathered from real program executions. Experiments
were performed in a segmented virtual memory with a
working set policy; the analyzing costs were kept low
using an efficient data reduction method. Empirical
results were obtained concerning the influence of the
window size on program behaviour characteristics, the
accuracy of some average working set size
approximations and the sensitivity of program behaviour
to the program's input data. These results show that
some commonly used assumptions concerning program
behaviour are inaccurate. Also there seem to exist
``ill-behaving'' programs, the behaviour of which does
not correspond well with results obtained earlier. The
effects of real-time delays during program execution
were considered using a new simple method. As an
additional experiment, segmenting and paging were
compared using various performance statistics; the
results seem to favour segmenting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:1980:PRB,
author = "Gopa Kumar and C. Thomas Nute",
title = "Program restructuring for block structured languages",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "67--79",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806151",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Prior studies of program restructuring to increase the
degree of locality of a program in a paged virtual
memory system were restricted to statically allocated
codes only. This work develops a restructuring
methodology for block structured languages like Algol,
with dynamic memory allocation. We subsequently
restructure and analyze different classes of programs
using this methodology and study the performance gains
realized with different restructuring heuristics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vantilborgh:1980:NCD,
author = "Hendrik T. Vantilborgh and Richard L. Garner and
Edward D. Lazowska",
title = "Near-complete decomposability of queueing networks
with clusters of strongly interacting servers",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "81--92",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The near-complete decomposability of queueing network
models of computer systems is generally supported by
very large differences in the service rates of the
servers. In this paper we show how such models may
still be nearly completely decomposable if on the one
hand these large differences can no longer be
realistically assumed (as is the case, for example, in
computer networks) but if on the other hand clusters of
strongly interacting servers exist. Our results may be
viewed as a bridge between the approaches to the
approximate analysis of queueing networks advanced by
Courtois and by Chandy, Herzog and Woo, since we show
circumstances under which the former approach leads to
exactly the same method of analysis as the latter. In
contrast to the Chandy, Herzog and Woo theorem,
however, the theory of near-complete decomposability
does not rely on the beneficent properties of queueing
networks exhibiting product form solutions. Thus our
results may point the way towards the theoretically
sound application of simple and intuitively appealing
approximate analysis techniques to non-product-form
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwajn:1980:FRE,
author = "Alexandre Brandwajn",
title = "Further results on equivalence and decomposition in
queueing network models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "93--104",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806153",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses three aspects related to the
notion of exact equivalence in queueing models. In many
cases the parameters of a system equivalent to a given
model involve only a small subset of conditional
probabilities of the state of the original model given
the equivalent one. It is shown that meaningful bounds
may be obtained for the conditional probabilities of
interest with little computational effort. Such bounds
are useful in assessing processing capacities as well
as the accuracy of approximate solutions. As a second
point it is shown that the notion of exact equivalence
may be easily extended to networks with non-exponential
servers. This is done for both the methods of
supplementary variables and for the embedded Markov
chain technique. Qualitative analysis of approximation
methods is also discussed. Finally, numerical methods
based on the notion of exact equivalence, i.e.
operating on conditional probabilities, are
considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stewart:1980:ECF,
author = "William J. Stewart and Gerald A. Zeiszler",
title = "On the existence of composite flow equivalent
{Markovian} servers",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "105--116",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing networks have been used to model a large
variety of complex systems. However, once a realistic
model has been constructed it has generally been
necessary to distort and modify it so that an analytic
solution could be obtained. Unfortunately, the analytic
solution often has little relation to the original
queueing system and consequently often produces
solutions with poor accuracy. We begin with a brief
introduction to the concepts of decomposition and
aggregation. Application of these and other approximate
methods to the analysis of computer systems are
discussed by Chandy and Sauer [CHAN78].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marie:1980:CEP,
author = "Raymond Marie",
title = "Calculating equilibrium probabilities for {$ \lambda
(n) / C_k / 1 / N $} queues",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "117--125",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806155",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Equilibrium state distributions are determined for
queues with load-dependent Poisson arrivals and service
time distributions representable by Cox's generalized
method of stages. The solution is obtained by
identifying a birth-death process that has the same
equilibrium state distribution as the original queue.
Special cases of two-stage (C2) and Erlang-k (Ek)
service processes permit particularly efficient
algorithms for calculating the load-dependent service
rates of the birth-death process corresponding to the
original queue. Knowing the parameters of the
birth-death process, the equilibrium state
probabilities can be calculated straight-forwardly.
This technique is particularly useful when subsystems
are reduced to flow-equivalent servers representing the
complementary network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:1980:HCS,
author = "Robert A. Wagner and Kishor S. Trivedi",
title = "Hardware configuration selection through discretizing
a continuous variable solution",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "127--142",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper extends a previous model for computer
system configuration planning developed by the authors.
The problem is to optimally select the CPU speed, the
device capacities, and file assignments so as to
maximize throughput subject to a fixed cost constraint.
We advocate solving this essentially discrete problem
in continuous variables followed by an appropriate
discretization. The discretization error thus committed
is analyzed in detail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bard:1980:MSD,
author = "Yonathan Bard",
title = "A model of shared {DASD} and multipathing",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "143--143",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a model of an I/O subsystem in
which devices can be accessed from multiple CPUs and/or
via alternative channel and control unit paths. The
model estimates access response times, given access
rates for all CPU-device combinations. The systems
treated are those having the IBM System/370
architecture, with each path consisting of a CPU,
channel, control unit, head of string, and device with
rotational position sensing. The path selected for an
access at seek initiation time remains in effect for
the entire channel program. The computation proceeds in
three stages: First, the feasibility of the prescribed
access rates is determined by solving a linear
programming problem. Second, the splitting of access
rates among the available paths is determined so as to
satisfy the following principle: The probability of
selecting a given path is proportional to the
probability that the path is free. This condition leads
to a set of nonlinear equations, which can be solved by
means of the Newton--Raphson method. Third, the RPS hit
probability, i.e. the probability that the path is free
when the device is ready to transmit, is computed in
the following manner: From the point of view of the
selected path, the system may be viewed as being in one
of 25 possible states. There are twelve different
subsets of states whose aggregate probabilities can be
computed from the (by now) known flow rates over the
various paths. The maximum entropy principle is used to
calculate the unknown state probabilities, with the
known aggregate probabilities acting as constraints.
The required RPS hit probability can be computed easily
once the state probabilities have been determined.
Explicit formulas are given for all these quantities.
Empirically derived formulas are used to compute the
RPS miss probability on subsequent revolutions, given
the probability on the first revolution. The model is
validated against a simulator, showing excellent
agreement for systems with path utilizations up to 50
percent. The model is also validated against
measurements from a real three-CPU system with 31
shared devices. In this validation, the I/O subsystem
model acts as a common submodel to three copies of a
system model, one for each CPU. Estimated end-user
transaction response times show excellent agreement
with the live measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lo:1980:CCP,
author = "T. L. Lo",
title = "Computer capacity planning using queueing network
models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "145--152",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents several computer capacity planning
case studies using a modeling tool, BEST/1, derived
from the theory of queueing networks. All performance
predictions were evaluated based on the selected
service levels such as response times and throughputs.
Advantages and disadvantages of using the modeling
approach are also briefly discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kurinckx:1980:OVC,
author = "A. Kurinckx and G. Pujolle",
title = "Overallocation in a virtual circuit computer network",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "153--158",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study the end-to-end control through
virtual circuits in a computer network built following
the X.25 Recommendations. We develop a mathematical
model to obtain the maximum overallocation of node
buffers, in order for the probability of overflow not
to exceed a given value.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Upton:1980:ADA,
author = "Richard A. Upton and Satish K. Tripathi",
title = "Analysis of design alternatives for a packet switched
{I/O} system",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "159--171",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806160",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an application of analytical
modeling to the design and evaluation of a general
purpose, packet-switched image processing system that
will soon enter an implementation phase. A bottom-up
modeling approach is used to evaluate such design
issues as optimal packet size, optimal channel access
method(s), and required number of processors and disks.
Based on the characteristics of various hardware
components and the predicted workload, specific design
recommendations are made.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balkovich:1980:PDS,
author = "Edward E. Balkovich and Colin Whitby-Strevens",
title = "On the performance of decentralized software",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "173--180",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distribution of computing to achieve goals such as
enhanced reliability depend on the use of decentralized
software. Decentralization typically replaces a
sequential process by a system of small, concurrent
processes that interact frequently. The implementation
of processes and their interactions represents a cost
incurred as a result of decentralization. Performance
measurements are reported in this paper for
decentralized software written in a programming
language for distributed computer systems. These
performance measurements confirm that low-cost
implementations of concurrency are possible, but
indicate that decentralized software makes heavy use of
run-time functions managing concurrency. An initial
model comparing the performance of a specific
decentralized software structure to its centralized
counterpart indicates that these implementation costs
are generally offset by the performance improvements
that are due to the parallelism inherent in the
decentralized structure. The research facilities for
continued study of decentralized software performance
are described in the summary.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Concurrent software; Decentralized control;
Decentralized software; Distributed computer systems;
Performance measurement and evaluation",
}
@Article{Grit:1980:PMA,
author = "Dale H. Grit and Rex L. Page",
title = "Performance of a multiprocessor for applicative
programs",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "181--189",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Applicative programming languages provide
opportunities for parallel processing without requiring
the programmer to be concerned with explicit
synchronization of portions of the computation. We
present a computational model of a multiprocessor which
executes applicative programs, and we analyze the
expected performance of the model via simulation. As
the number of processors is doubled, elapsed execution
time is nearly halved, until system bottlenecks occur.
An alternative model is proposed which alleviates these
bottlenecks. The basis of the second model is an
interconnection switch which is characterized by $ \log
(n) $ access time and $ n \log (n) $ cost.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dhas:1980:PEF,
author = "C. Retna Dhas",
title = "Performance evaluation of a feedback data flow
processor using simulation",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "191--197",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a method to estimate the
performance of a feedback data flow processor using
software simulation. A brief over view of a data flow
language and a data flow processor along with the
conceptual view of a software simulator are described.
Numerical measurements of parallelism and resources
requirements are obtained by translating high level
language programs to data flow language and then
executing them on the simulator.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bryant:1980:HMG,
author = "Raymond M. Bryant",
title = "On homogeneity in {M\slash G\slash 1} queueing
systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "199--208",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Operational analysis replaces certain classical
gueueing theory assumptions with the condition of
``homogeneous service times.'' In this paper, we show
that the sample paths of an M/G/1 queueing system have
this property with non-zero probability if and only if
the service time distribution is exponential. We also
consider the relationship of the operational
performance measures S(n) and the mean service time.
This relationship is shown to depend on the form of the
service distribution. It follows that using operational
analysis to predict the performance of an M/G/1
queueing system when the mean service time is changed
will be most successful when the service time
distribution is exponential. Simulation evidence is
presented which supports this claim.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1980:ORP,
author = "E. G. {Coffman, Jr.} and Erol Gelenbe and Roger C.
Wood",
title = "Optimal replication of parallel-read, sequential-write
systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "209--216",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Frequently used computer elements that can be written
by at most one process at a time constitute important
bottlenecks in multiprocessor system operation,
particularly when such elements are accessible only
serially. Hardware devices, data files, system tables
and critical sections in general may be examples of
such elements. One common way to relieve this
congestion is to provide several copies of the element,
which can then be read (used) in parallel. However, the
requirement that writing (changing) remain sequential
means that writing times increase with the number of
copies provided. The optimization question in this
trade-off is the main concern of this paper. A
probability model of such a system is formulated with
the objective of obtaining read-rate capacities as a
function of read/write loads and the number of copies
provided. The above optimization problem is expressed
in terms of these results and then solved. In
particular, it is shown how to select the number of
copies that maximizes the read-rate capacity for given
system parameters. Two distinct operating regimes,
based on how interrupted read operations are restarted,
are analyzed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shore:1980:LRO,
author = "John E. Shore",
title = "The lazy repairman and other models: {Performance}
collapse due to overhead in simple, single-server
queuing systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "217--224",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider two simple models of overhead in batch
computer systems and demand access communications
systems. The first, termed ``modified M/M/1/K, ``is an
exponential, single-server queuing system with finite
storage capacity, constant arrival rate, and
queue-length-dependent service time. We consider cases
in which the expected service time consists of a
constant plus a term that grows linearly or
logarithmically with the queue length. We show that the
performance of this system --- as characterized by the
expected number of customers in the system, the
expected time in the system, and the rate of missed
customers --- can collapse as the result of small
changes in the arrival rate, the overhead rate, or the
queue capacity. The system has the interesting property
that increasing the queue capacity can decrease
performance. In addition to equilibrium results, we
consider the dynamic behavior of the model. We show
that the system tends to operate in either of two
quasi-stable modes of operation --- one with low queue
lengths and one with high queue lengths. System
behavior is characterized by long periods of operation
in both modes with abrupt transitions between them. We
point out that the performance of a saturated system
may be improved by dynamic operating procedures that
return the system to the low mode. In the second model,
termed the ``lazy repairman, ``the single server has
two distinct states: the ``busy'' state and the
``lazy'' state. Customers receive service only when the
server is in the busy state; overhead is modeled by
attributing time spent in the lazy state to overhead
functions. When the expected time spent in the lazy
state increases with the number of customers waiting
for service, the behavior of the lazy repairman model
is similar to the modified M/M/1/K, although the lazy
repairman model makes it easier to study in detail the
effects of overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:1980:RTD,
author = "Simon S. Lam and A. Udaya Shankar",
title = "Response time distributions for a multi-class queue
with feedback",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "225--234",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A single server queue with feedback and multiple
customer classes is analyzed. Arrival processes are
independent Poisson processes. Each round of service is
exponentially distributed. After receiving a round of
service, a customer may depart or rejoin the end of the
queue for more service. The number of rounds of service
required by a customer is a random variable with a
general distribution. Our main contribution is
characterization of response time distributions for the
customer classes. Our results generalize in some
respects previous analyses of processor-sharing models.
They also represent initial efforts to understand
response time behavior along paths with loops in local
balanced queueing networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1980:AIO,
author = "Y. T. Wang",
title = "Analysis of an intrinsic overload control for a class
of queueing systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "235--243",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806168",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a priority queueing system which consists
of two queues sharing a processor and in which there is
delayed feedback. Such a model arises from systems
which employ a priority assignment scheme to achieve
overload control. An analytic expression for the
stationary probability of the queue lengths is derived.
An algorithm is proposed to compute the queue lengths
distribution. Some numerical results are illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1980:ASD,
author = "Connie Smith and J. C. Browne",
title = "Aspects of software design analysis: {Concurrency} and
blocking",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "245--253",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806169",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper extends previous work on development of a
methodology for the prediction of the performance of
computer software systems from design level
specifications and continuing through implementation.
The effects of synchronized behavior, such as results
from data reservation in multi-thread executions of
data base systems, and competition for host system
resources are incorporated. The previous methodology
uses hierarchical graphs to represent the execution of
software on some host computer system (or on some
abstract machine). Performance metrics such as response
time were obtained from analysis of these graphs
assuming execution of a single copy on a dedicated
host. This paper discusses the mapping of these
execution graphs upon queueing network models of the
host computing environment to yield performance metric
estimates for more complex and realistic processing
environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Potier:1980:ALP,
author = "D. Potier and Ph. Leblanc",
title = "Analysis of locking policies in database management
systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "255--255",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806170",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Quantitative analysis of locking mechanisms and of
their impact on the performance of transactionnal
systems have yet received relatively little attention.
Although numerous concurrency mechanisms have been
proposed and implemented, there is an obvious lack of
experimental as well as analytical studies of their
behaviour and their influence on system performance. We
present in this paper an analytical framework for the
performance analysis of locking mechanisms in
transactionnal systems based on hierarchical analytical
modelling. Three levels of modelling are considered: at
level 1, the different stages (lock request, execution,
blocking) transactions of through during their
life-time are described; the organization and
operations of the CPU and I/O resources are analysed at
level 2; transaction's behaviour during their lock
request phase is analysed at modelling level 3. This
hierarchical approach is applied to the analysis of a
physical locking scheme involving a static lock
acquisition policy. A simple probabilistic model of the
transaction behaviour is used to derived the
probability that a new transaction is granted the locks
it requests given the number of transactions already
active as a function of the granularity of the
database. On the other hand, the multiprogramming
effect due to the sharing of CPU and I/O resources by
transactions is analysed using the standard queueing
network approaches and the solution package QNAP. In a
final step, the results on the blocking probabilities
and the multiprogramming effect are used as input of a
global performance model of the transactionnal system.
Markovian analysis is used to solve this model and to
obtain the throughput of the system as a function of
the data base granularity and other parameters. The
results obtained provide a clear understanding of the
various factors which determine the global performance,
of their role and importance. They also raise many new
issues which can only be solved by further extensive
experimental and analytical studies and show that two
particular topics deserve special attention: the
modelling of transaction behaviour and the modelling of
locking overheads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1980:ONC,
author = "E. G. {Coffman, Jr.} and E. Gelenbe and B. Plateau",
title = "Optimization of the number of copies in a distribution
data base",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "257--263",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806171",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the effect on system performance of the
distribution of a data base in the form of multiple
copies at distinct sites. The purpose of our analysis
is to determine the gain in READ throughput that can be
obtained in the presence of consistency preserving
algorithms that have to be implemented when UPDATE
operations are carried out on each copy. We show that
READ throughput diminishes if the number of copies
exceeds an optimal value. The theoretical model we
develop is applied to a system in which consistency is
preserved through the use of Ellis's ring algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruschitzka:1980:RJC,
author = "Manfred Ruschitzka",
title = "The response of job classes with distinct policy
functions (Extended Abstract)",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "265--265",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806172",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Policy function schedulers provide a flexible
framework for implementing a wide range of different
scheduling schemes. In such schedulers, the priority of
a job at any instant in time is defined by the
difference between the time it spent in the system and
an arbitrary function of its attained service time. The
latter is called the policy function and acts as the
functional parameter that specifies a particular
scheduling scheme. For instance, a constant policy
function specifies the first-come, first-serve
scheduling scheme. By changing the policy function, the
system behavior can be adjusted to better conform with
desired response characteristics. It is common to
express response characteristics in terms of a response
function, the average response time of a job
conditioned on its service requirement in equilibrium.
In this paper, we analyze processor-sharing M/G/1
systems in which the priorities of different classes of
jobs are determined by distinct policy functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:1980:PTO,
author = "K. H. Kim and Mahmoud Naghibzadeh",
title = "Prevention of task overruns in real-time
non-preemptive multiprogramming systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "267--276",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806173",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time multiprogramming systems, in which a
hardware processor is dynamically assigned to run
multiple software processes each designed to control an
important device (user), are considered. Each software
process executes a task in response to a service
request repeatedly coming from the corresponding user.
Each service task is associated with a strict deadline,
and thus the design problem that we are concerned with
is to ensure that the service tasks requested can
always be executed within the associated deadlines,
i.e., no task overrun occurs. This problem was studied
by several investigators for the cases where preemptive
scheduling strategies are used. In contrast, very few
studies have been conducted for cases of non-preemptive
scheduling. In this paper we show that a non-preemptive
strategy, called relative urgency non-preemptive (RUNP)
strategy, is optimal in the sense that if a system runs
without a task overrun under any non-preemptive
strategy, it will also run without a task overrun under
the RUNP strategy. Then an efficient procedure used at
the design time for detecting the possibility of a task
overrun in a system using the RUNP strategy is
presented. The procedure is useful in designing
overrun-free real-time multiprogramming systems that
yield high processor utilizations. Some special types
of systems using the RUNP strategy for which even
simpler detection procedures are available are also
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Non-preemptive scheduling; Real-time multiprogramming;
Relative urgency; Task overrun; Time critical process",
}
@Article{King:1980:NMI,
author = "P. J. B. King and I. Mitrani",
title = "Numerical methods for infinite {Markov} processes",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "277--282",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806174",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The estimation of steady state probability
distributions of discrete Markov processes with
infinite state spaces by numerical methods is
investigated. The aim is to find a method applicable to
a wide class of problems with a minimum of prior
analysis. A general method of numbering discrete states
in infinite domains is developed and used to map the
discrete state spaces of Markov processes into the
positive integers, for the purpose of applying standard
numerical techniques. A method based on a little used
theoretical result is proposed and is compared with two
other algorithms previously used for finite state space
Markov processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fayolle:1980:SCT,
author = "G. Fayolle and P. J. B. King and I. Mitrani",
title = "The solution of certain two-dimensional {Markov}
models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "283--289",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806175",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class of two-dimensional Birth-and-Death processes,
with applications in many modelling problems, is
defined and analysed in the steady-state. These are
processes whose instantaneous transition rates are
state-dependent in a restricted way. Generating
functions for the steady-state distribution are
obtained by solving a functional equation in two
variables. That solution method lends itself readily to
numerical implementation. Some aspects of the numerical
solution are discussed, using a particular model as an
example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1980:EIE,
author = "Jon D. Clark and Robert M. Golladay",
title = "Empirical investigation of the effectiveness of
several computer performance evaluation tools",
journal = j-SIGMETRICS,
volume = "9",
number = "3",
pages = "31--36",
month = "Fall",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041883.1041884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of tools exist for computer selection
evaluation. The operational cost of applying these vary
considerably as does the precision of the performance
prediction. This paper compares the precision of
several commonly used methods in a single test case,
namely cycle time, instruction mix analysis and
benchmarking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmark; computer; cycle time; instruction mix;
performance evaluation",
}
@Article{Estell:1980:BW,
author = "Robert G. Estell",
title = "Benchmarks and watermarks",
journal = j-SIGMETRICS,
volume = "9",
number = "3",
pages = "39--44",
month = "Fall",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041883.1041885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Historically, benchmark tests have been one of several
ways to size a computer system, and measure its
performance. Today, it is more important to test the
System Software than the machine hardware. (Thus the
term `watermark' (as on bond paper) for software
tests.) Watermarks of software suffer the same
limitations and risks as benchmarks of hardware: e.a.,
they should be supplemented with simulations, models,
and other analysis and design tools of our trade.
Perhaps most significantly, watermarks, like
benchmarks, can be biased by their creators.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleijnen:1980:SMM,
author = "J. P. C. Kleijnen",
title = "Scoring methods, multiple criteria, and utility
analysis",
journal = j-SIGMETRICS,
volume = "9",
number = "3",
pages = "45--56",
month = "Fall",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041883.1041886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scoring methods are popular in computer selection, and
try to combine different attributes into an overall
performance measure. Related is the multi-criteria
evaluation of computerized information systems. The
scoring method is criticized in the context of more
general utility models, popular in economics. Scoring
provides simplistic choice models, and should not be
used as predictive, causal models. Many references for
further study are included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1980:TTI,
author = "Peter J. Denning",
title = "A tale of two islands: a fable",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "7--10",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041889",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Once upon a time there were two islands. One was
called Stochasia. Its citizens were well cultured and
they had achieved high development in a system of
mathematics for random processes. The other island was
called Operatia. Its citizens were well cultured and
they had achieved high development in a system for
experimentation with nondeterminate phenomena. Both
civilizations were closed societies. Neither knew of
the other's existence, and it had been so since the
beginning of time. Neither would ever have known, had
it not been for the events I will describe shortly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yader:1980:ACP,
author = "Mark J. Yader",
title = "{ADP} capacity planning: a case study",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "11--25",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041890",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A case study of short-range ADP capacity planning is
presented and related to the process of long-range
planning. Short-range capacity planning is concerned
with identification of computer and communication
resources which will reach saturation levels in the
near future. The initial step in the short-range
planning process is to evaluate the performance of the
user's current system configuration and one or more
configuration enhancements with respect to their
effectiveness in supporting a projected workload.
Central to long-range planning is the evaluation of a
broader range of architectural alternatives, including
various distributed processing design. In both short
range and long range planning, system modeling is a
basic tool for evaluating alternatives. An analytic
network of queues model has been developed to reflect
both centralized and hierarchically distributed network
architectures. The application of the tool as part of
the short-range case study is described.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marrevee:1980:HFF,
author = "J. Marrev{\'e}e",
title = "How friendly and fast is {FAST DUMP RESTORE}",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "28--35",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "FAST DUMP RESTORE, shortly named FDR, is a very well
known software package, delivered by the software house
Innovation Data Processing, and in some countries of
Europe commercially supported by Westinghouse. This
package is used in many computer centres using one of
IBM's big operating systems e.g. MVT or MVS. According
to Innovation's own remarks it became one of the most
successful software products in the world with about
3000 users, and since 1974 it is every year on the
DATAPRO HONOR ROLL. It should, among others, provide
superior performance on creation of dumps or restores
of disk packs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bernard:1980:SUM,
author = "J. C. Bernard",
title = "{T-scan}: the use of micro computers for response time
measurements",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "39--50",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041892",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "All large computer centers are actually faced with a
major change in their workload. Most applications are
leaving batch operations for time sharing ease of use.
In fact, all kinds of computer work could be performed
through a console: development, maintenance, data base
query and update and even batch control and submit. A
second problem arises as end-user profile is no more
computer oriented. Users only look at the time the
system needs to answer their requests, and don't care
about the computer game. So performance analysts and
operations managers are supposed to achieve a certain
level of service which they are almost unable to
measure. We try in this paper to discuss some major
problems related to conversational computer operations.
We will present several drawbacks characterising the
currently existing solutions. A problem that lead us to
define simple operating principle for response time
measurements. This principle is implemented in a fully
automatic measurement tool named T-SC",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bell:1981:SLC,
author = "Thomas E. Bell",
title = "Structured life-cycle assumptions",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "1--3",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New programmers, some managers, and lots of users
don't understand the advantages of a structured
software life-cycle. However, only a single experience
with coding while designing will convince any incipient
software engineer that a controlled process is needed
from the time of system concept though the last
maintenance phase. Software Configuration Management
has become almost a religion, and EDP auditors have
even encountered a few systems that appear to have been
specified, then designed, then implemented, then
tested, and finally installed --- all before
maintenance and redefinition occurred. Perhaps the
millennium has finally arrived, and software people
will soon live in a controlled world with rational
practices. If you are tempted to believe the foregoing
prediction, read the latest issue of FORTUNE, the WALL
STREET JOURNAL, or COMMERCE BUSINESS DAILY and note a
few problems that may divert us from the path to
Nirvana. Data Processing supports commercial,
educational, industrial, and governmental activities
that are frequently (and repeatedly) redirected. Under
circumstances of a largely random environment with
thorough business planning a rarity, a critical support
activity can expect to be redirected frequently. New
ideas will be sliced into partly-completely DP
projects, and users ``analytical analyses'' will become
DP systems as if by magic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coughlin:1981:SDM,
author = "Donald T. Coughlin",
title = "System development methodology or system research
methodology?",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "5--6",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A future data processing historian may someday point
to the 1970s as the decade when business application
systems began their adolescent growth period. We
entered the 1970s with few truly on-line business
systems, and many application designers did not fully
appreciate the capabilities and limitation of index
sequential file structures. Many of the larger
companies were busy writing their own tp monitors and
file handling systems, and it is very possible that
more professional hours were being devoted to the
development of control program software than to
applications software. The last decade did provide the
application programmer with new control program tools
such as data base management systems and on-line
terminal control software. It also generated a
continuing demand for computer performance software
specialists to tune application systems immediately
after initial implementation. These performance tuning
efforts often required substantial changes to the
application system --- not just program code but also
basic redesign. Therefore were these really system
development projects or were they system research
projects?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Earle:1981:AAB,
author = "Dennis M. Earle",
title = "An alchemical approach to brokerage",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "7--8",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The essence of the commodities business is the ability
to react quickly to evolving market conditions.
Mocatta, a N.Y. based bullion dealer, is a firm which
uses its Data Processing to provide both front office
(trading) flexibility and back-office capacity to
handle large volume days. The business is characterized
by the constant trade-off of time against money.
Corporate philosophy is to spend money to react quickly
rather than to react slowly but perhaps at lower costs.
The life cycle of a system in this environment normally
begins with a marketing report reflecting a new market
niche which the firm can take advantage of. Data
Processing is involved almost from the inception of the
idea to provide an indication of what existing systems
can do for this new opportunity. Because of the nature
of the business, each new product offered is usually so
unique as to make it impossible for existing systems to
support a new product from a trading point of view.
Back-office applications are somewhat more common
across products, so existing systems can usually
provide some support. The key point is that all we
really know is that we want to market the new product.
Some idea of the time frame in which the product is to
be offered is also obtained. The exact workings of
defining the product and determining the parameters
under which it will be traded usually remain to be
worked out prior to the offering date. This therefore
means that we have, at the point of commitment, the
necessity for evolving data processing support in the
same time frame in which the definition is evolving
about what it is that we are to support.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1981:PAI,
author = "Mitchell G. Spiegel",
title = "Prototyping: an approach to information and
communication system design",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "9--19",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes prototyping, a state-of-the-art
methodology to assist a design team in making a through
definition and analysis of new requirements,
feasibility, alternative selections, workload impact,
system and/or application specification,
implementation, and testing. Suggested prototype tools
and techniques are presented, and guidance is included
to aid a design team in obtaining accurate and timely
results. This paper is not intended to be a complete
text on design. It should be enhanced with a design
team's expertise, consultation from sources with design
experience, and reference to other design literature.
Prototyping is a process (the act, study, or skill) of
modeling an information-communication system
architecture in one or more levels of detail, using
descriptive models, abstract models, and working models
of the system and its component parts (synonym:
archetyping). This work was completed while the author
was working with prior employers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jenkins:1981:APC,
author = "C. Wesley Jenkins",
title = "Application prototyping: a case study",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "21--27",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Accurate specification of user requirements for
interactive systems is especially difficult in an
environment where the demand for information is
intense, short-fused and largely unpredictable. The
Congressional Budget Office was created in 1975 by an
Act of Congress. Its primary mandate is to serve the
Budget and Appropriation committees of both the Senate
and the House of Representatives. The Act also defined
a Congressional Budget process specifying a calendar of
events and specific completion dates for major
activities. This placing of budgetary actions produces
a highly charged environment in which CBO must be able
to respond immediately to information needs with
information that is both accurate and consistent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1981:SRT,
author = "Patricia R. Cox",
title = "Specification of a regression test for a mini computer
operating system",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "29--32",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper I describe the practical problems of
designing a regression test set for an existing
mini-computer operating system. The ideal regression
test would test each function with all possible
combinations of the options for each variation of the
operating system. This is impractical if not impossible
so the alternative is to choose the individual cases
for maximum coverage. To do that the system is viewed
both functionally and structurally and cases are
selected for inclusion in the test set. The method of
selecting the tests is described along with the tools
that will be needed to measure the coverage and to
maintain the test set.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bryan:1981:ASC,
author = "William Bryan and Stanley Siegel and Gary
Whiteleather",
title = "An approach to software configuration control",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "33--47",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to discuss the process by
which a system's life cycle and its associated life
cycle products are managed to ensure the quality and
integrity of the system. We call this process
configuration control. Although many of the ideas in
this paper are applicable to systems in general, the
focus of this paper is on configuration control of
systems with software content. It is becoming apparent
to many, in both government and private industry, that
the high cost of maintenance of existing computer
systems may be attributed to poor configuration control
early in the system's life cycle. For example, in an
article entitled `A Corporate Road, Map for Systems
Development in the `80s, the following claim appears.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fredrick:1981:PIS,
author = "C. R. Fredrick",
title = "Project implementation of {Software Configuration
Management}",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "49--56",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Have you or one of your programmers said: ``The system
ran yesterday; I only changed one line.'' or ``I spent
my budget, but I'm not done.'' or ``I fixed that
problem yesterday, but it's back now.'' or ``I thought
it would be a nice feature for the operator, so I added
it to the program.'' or ``Why was this line of code
changed? Who did it and when?''? If these or other
similar statements are familiar, then Software
Configuration Management is a subject that should
interest you. Software Configuration Management (SCM)
is a management method that establishes a discipline
for the software development process and provides
visibility to that process. The step by step procedures
used by a large software organization to resolve some
of their development problems will be followed here.
The result of their efforts was the formulation of a
management method that significantly improved the
quality of their software products and reduced the
costs. It was learned later that other software
organizations had gone through similar processes and
arrived at similar results. This new tool is now known
as Software Configuration Management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berlack:1981:ISC,
author = "H. Ronald Berlack",
title = "Implementing software configuration control in the
structured programming environment",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "57--77",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The fundamental problems in the control of software
are explored. The elements of control as they relate to
communications is defined, and the implementation of
these elements in solving the fundamental problems and
achieving optimal control during a software development
life cycle, is explained. Control is defined as a
vehicle for communicating changes to established,
agreed-upon baseline points, made up of documents and
subsequent computer programs. By communicating change
to those involved or affected, and obtaining agreement
of the change, one achieves a degree of control that
does not inhibit software engineering innovation or
progress, but helps maintain the project's prime
objectives to deliver maintainable, error-free software
to the ultimate user.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gross:1981:PCV,
author = "Peter Gross",
title = "Producers and consumers views of software quality
(Panel Session)",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "79--79",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "At this very ACM workshop/symposium indicates,
software quality is of great concern to both producers
and users of software. It should be obvious to those
who have attended the earlier sessions today and to
those who will attend the sessions tomorrow that
quality is something that cannot be tested into a
system or added to a system. It must be integral from
the start of the definition of the system's
requirements through each phase of analysis, design,
implementation, integration, testing, and installation.
Software quality implies an engineering type approach
to the development of software. It implies the use of a
disciplined development environment, and the use of
tools and techniques to provide assurances throughout
the software development process that both the software
and its baseline specifications are complete,
consistent, and traceable from one to another.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Henry:1981:RAT,
author = "Sallie Henry and Dennis Kafura and Kathy Harris",
title = "On the relationships among three software metrics",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "81--88",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Automatable metrics of software quality appear to have
numerous advantages in the design, construction and
maintenance of software systems. While numerous such
metrics have been defined, and several of them have
been validated on actual systems, significant work
remains to be done to establish the relationships among
these metrics. This paper reports the results of
correlation studies made among three complexity metrics
which were applied to the same software system. The
three complexity metrics used were Halstead's effort,
McCabe's cyclomatic complexity and Henry and Kafura's
information flow complexity. The common software system
was the UNIX operating system. The primary result of
this study is that Halstead's and McCabe's metrics are
highly correlated while the information flow metric
appears to be an independent measure of complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Szulewski:1981:MSS,
author = "Paul A. Szulewski and Mark H. Whitworth and Philip
Buchan and J. Barton DeWolf",
title = "The measurement of software science parameters in
software designs",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "89--94",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Metrics of software quality have historically focused
on code quality despite the importance of early and
continuous quality evaluation in a software development
effort. While software science metrics have been used
to measure the psychological complexity of computer
programs as well as other quality related aspects of
algorithm construction, techniques to measure software
design quality have not been adequately addressed. In
this paper, software design quality is emphasized. A
general formalism for expressing software designs is
presented, and a technique for identifying and counting
software science parameters in design media is
proposed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Basili:1981:ECS,
author = "Victor R. Basili and Tsai-Yun Phillips",
title = "Evaluating and comparing software metrics in the
software engineering laboratory",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "95--106",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has appeared in the literature a great number of
metrics that attempt to measure the effort or
complexity in developing and understanding software\1.
There have also been several attempts to independently
validate these measures on data from different
organizations gathered by different people\1. These
metrics have many purposes. They can be used to
evaluate the software development process or the
software product. They can be used to estimate the cost
and quality of the product. They can also be used
during development and evolution of the software to
monitor the stability and quality of the product. Among
the most popular metrics have been the software science
metrics of Halstead, and the cyclomatic complexity
metric of McCabe. One question is whether these metrics
actually measure such things as effort and complexity.
One measure of effort may be the time required to
produce a product. One measure of complexity might be
the number of errors made during the development of a
product. A second question is how these metrics compare
with standard size measures, such as the number of
source lines or the number of executable statements,
i.e., do they do a better job of predicting the effort
or the number of errors? Lastly, how do these metrics
relate to each other?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ronback:1981:TMS,
author = "James Ronback",
title = "Test metrics for software quality",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "107--107",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses Bell Northern Research's
experience in utilizing an extended set of test metrics
for assuring the quality of software. The theory and
use of branch and path class coverage is discussed and
the reaction of users in described. This paper also
discusses the effect of using co-resident inspection
procedures in achieving cost-effective testing for a
high degree of test coverage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Benson:1981:AST,
author = "J. P. Benson",
title = "Adaptive search techniques applied to software
testing",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "109--116",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An experiment was performed in which executable
assertions were used in conjunction with search
techniques in order to test a computer program
automatically. The program chosen for the experiment
computes a position on an orbit from the description of
the orbit and the desired point. Errors were interested
in the program randomly using an error generation
method based on published data defining common error
types. Assertions were written for program and it was
tested using two different techniques. The first
divided up the range of the input variables and
selected test cases from within the sub-ranges. In this
way a ``grid'' of test values was constructed over the
program's input space. The second used a search
algorithm from optimization theory. This entailed using
the assertions to define an error function and then
maximizing its value. The program was then tested by
varying all of them. The results indicate that this
search testing technique was as effective as the grid
testing technique in locating errors and was more
efficient. In addition, the search testing technique
located critical input values which helped in writing
correct assertions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paige:1981:DST,
author = "Michael Paige",
title = "Data space testing",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "117--127",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A complete software testing process must concentrate
on examination of the software characteristics as they
may impact reliability. Software testing has largely
been concerned with structural tests, that is, test of
program logic flow. In this paper, a companion software
test technique for the program data called data space
testing is described. An approach to data space
analysis is introduced with an associated notation. The
concept is to identify the sensitivity of the software
to a change in a specific data item. The collective
information on the sensitivity of the program to all
data items is used as a basis for test selection and
generation of input values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:1981:OTP,
author = "Amrit L. Goel",
title = "Optimal testing policies for software systems",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807918",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An important problem of practical concern is to
determine how much testing should be done before a
system is considered ready for release. This decision,
of course, depends on the model for the software
failure phenomenon and the criterion used for
evaluating system readiness. In this paper, we first
develop a cost model based on the time dependent
failure rate function of Goel and Okumoto. Next, we
derive policies that yield the optimal values of the
level of test effort (b*) and software release time
(T*). The sensitivity of the optimal solution is also
numerically evaluated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Littlewood:1981:BDD,
author = "B. Littlewood",
title = "A {Bayesian} differential debugging model for software
reliability",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An assumption commonly made in early models of
software reliability is that the failure rate of a
program is a constant multiple of the number of bugs
remaining. This implies that all bugs have the same
effect upon the overall failure rate. The assumption is
challenged and an alternative proposed. The suggested
model results in earlier bug-fixes having a greater
effect than later ones (the worst bug show themselves
earlier and so are fixed earlier), and the DFR properly
between bug-fixes (confidence in programs increases
during periods of failure-free operation, as well as at
bug-fixes). The model shows a high degree of
mathematical tractability, and allows a range of
reliability, and allows a range of reliability measures
to be calculated exactly. Predictions of total
execution time to achieve a target reliability, are
obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musa:1981:SRMa,
author = "J. D. Musa and A. Iannino",
title = "Software reliability modeling accounting for program
size variation due to integration or design changes",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimation of software reliability quantities has
traditionally been on stable systems; i.e., systems
that are completely integrated and are not undergoing
design changes. Also, it is assumed that test results
are completely inspected for failures. This paper
describes a method for relaxing the foregoing
conditions by adjusting the lengths of the intervals
between failures experienced in tests as compensation.
The resulting set of failure intervals represents the
set that would have occurred for a stable system in its
final configuration with complete inspection. The
failure intervals are then processed as they would be
for a complete system. The approach is developed for
the execution time theory of software reliability, but
the concepts could be applied to many other models the
estimation of quantities of interest to the software
manager are illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musa:1981:SRMb,
author = "John D. Musa",
title = "Software reliability measurement session",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many people think of reliability as a devoutly wished
for but seldom present attribute of a program. This
leads to the idea that one should make a program as
reliable as one possibly can. Unfortunately, in the
real world software reliability is usually achieved at
the expense of some other characteristic of the product
such as program size, run or response time,
maintainability, etc. or the process of producing the
product such as cost, resource requirements,
scheduling, etc. One wishes to make explicit trade-offs
among the software product and process rather than let
them happen by chance. Such trade-offs imply the need
for measurement. Because of mounting development and
operational costs, pressures for obtaining better ways
of measuring reliability, have been mounting. This
session deals with this crucial area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:1981:WST,
author = "Amrit L. Goel and Kazuhira Okumoto",
title = "When to stop testing and start using software?",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "131--138",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During the last decade, numerous studies have been
undertaken to quantify the failure process of large
scale software systems. (see for example, references
1-12.) An important objective of these studies is to
predict software performance and use the information
for decision making. An important decision of practical
concern is the determination of the amount of time that
should be spent in testing. This decision of course
will depend on the model used for describing the
failure phenomenon and the criterion used for
determining system readiness. In this paper we present
a cost model based on the time dependent fault
detection rate model of Goel and Okumoto (4,5) and
describe a policy that yields the optimal value of test
time T.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Littlewood:1981:SRG,
author = "B. Littlewood",
title = "Stochastic reliability growth: a model with
applications to computer software faults and hardware
design faults",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "139--152",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An assumption commonly made in early models of
software reliability is that the failure rate of a
program is a constant multiple of the number of faults
remaining. This implies that all faults have the same
effect upon the overall failure rate. The assumption is
challenged and an alternative proposed. The suggested
model results in earlier fault-fixes having a greater
effect than later ones (the worst faults show
themselves earlier and so are fixed earlier), and the
DFR property between fault-fixes (confidence in
programs increases during periods of failure-free
operations, as well as at fault-fixes). The model shows
a high degree of mathematical tractability, and allows
a range of reliability measures to be calculated
exactly. Predictions of total execution time to achieve
a target reliability, and total number of fault-fixes
to target reliability, are obtained. It is suggested
that the model might also find applications in those
hardware reliability growth situations where design
errors are being eliminated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Decreasing failure rate; Design debugging; Design
errors; Pareto Distribution; Probability distribution
mixture; Programming debugging modelling; Reliability
growth; Software errors; Software failure rate;
Software faults; Software mttf; Software reliability",
}
@Article{Ottenstein:1981:SDS,
author = "Linda M. Ottenstein",
title = "Software defects --- a software science perspective",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "153--155",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper gives a model for computing the programming
time. The results of tests with programs in APL, BASIC,
and FORTRAN are also given and discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ottenstein:1981:PNE,
author = "Linda Ottenstein",
title = "Predicting numbers of errors using software science",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "157--167",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An earlier paper presented a model based on software
science metrics to give quantitative estimate of the
number of bugs in a programming project at the time
validation of the project begins. In this paper, we
report the results from an attempt to expand the model
to estimate the total number of bugs to expect during
the total project development. This new hypothesis has
been tested using the data currently available in the
literature along with data from student projects. The
model fits the published data reasonably well, however,
the results obtained using the student data are not
conclusive.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schneider:1981:SEE,
author = "Victor Schneider",
title = "Some experimental estimators for developmental and
delivered errors in software development projects",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "169--172",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Experimental estimators are presented relating the
expected number of software problem reports (B) in a
software development project to the overall reported
professional effort (E) in ``man months'' the number of
subprograms (n) the overall count of thousands of coded
source statements of software(S). [equation] These
estimators are shown to be consistent with data
obtained from the Air Force's Rome Air Development
Center, the Naval Research Laboratory, and Japan's
Fujitsu Corporation. Although the results are
promising, more data is needed to support the validity
of these estimators.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sneed:1981:SSA,
author = "H. Sneed",
title = "{SOFTDOC} --- {A} system for automated software static
analysis and documentation",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "173--177",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The code itself is useless without adequate
documentation. Besides that it is almost impossible to
validate and verify code unless it is properly
documented. Yet most of the attention of the past has
been devoted to producing code and little to producing
the documentation although it is obvious that it is
necessary both for testing and maintaining the software
product. Software documentation can be classified
according to its usage. Thus, there is a functional
documentation for describing what a system does and
what it is used for, and technical documentation for
describing how the software is constructed and how it
performs its functions. The former is directed toward
the user, the latter toward the tester and maintainer.
The two are, however, highly interrelated. Since the
programmer seldom writes the user documentation it is
necessary for those who describe what the system does,
to know how it does it. An accurate technical
documentation is a prerequisite for producing accurate
user documentation. Finally it serves yet another
purpose. Without it, it is not possible to control the
quality of the software. Software Quality Control
presupposes a full and up to date technical description
in order to assess the characteristics of the system
such as modularity, portability, reliability, etc.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crowley:1981:ADP,
author = "John D. Crowley",
title = "The application development process: {What}'s wrong
with it?",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "179--187",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper will examine the process used in the
development of computer applications. The claim is made
that the current methodology has serious deficiencies,
but that a software development approach is becoming
available to help address these problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bailey:1981:SSU,
author = "C. T. Bailey and W. L. Dingee",
title = "A software study using {Halstead} metrics",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "189--197",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an application of Maurice
Halstead's software theory to a real time switching
system. The Halstead metrics and the software tool
developed for computing them are discussed. Analysis of
the metric data indicates that the level of the
switching language was not constant across algorithms
and that software error data was not a linear function
of volume.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Esposito:1981:WCT,
author = "A. Esposito and A. Mazzeo and P. Costa",
title = "Workload characterization for trend analysis",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "5--15",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041800",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The methodology of analysis proposed in this paper
aims at predicting the workload of a computer. This
methodology consists of applying an algorithm of
clustering to the workload, its jobs being identified
by a pair $ (X, P) $, where $X$ is the resource-vector
of the job and $P$ stands for the priority given to the
job by the user. The hereby obtained clusters are then
associated to the $ a_i$ activities developed in the
system and determine the influence of each $ a_i$ to
the overall workload. By repeating this operation at
different times, either the periodicity or the
monotonic changes that may occur in each activity are
determined. This makes it possible to predict the
evolution of the overall workload and consequently to
evaluate changes to be carried out in the system. The
above methodology is applied to a specific case and is
illustrated in its various phases. The results obtained
have validated the method. The study is still going on,
with continuous periodical observations in order to
update the data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musa:1981:SRMc,
author = "J. D. Musa and A. Iannino",
title = "Software reliability modeling: accounting for program
size variation due to integration or design changes",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "16--25",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041801",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimation of software reliability quantities has
traditionally been based on stable programs; i.e.,
programs that are completely integrated and are not
undergoing design changes. Also, it is ordinarily
assumed that all code is being executed at one time or
another and that test or operational results are being
completely inspected for failures. This paper describes
a method for relaxing the foregoing conditions by
adjusting the lengths of the intervals between failures
experienced as compensation. The resulting set of
failure intervals represents the set that would have
occurred for a completely inspected program that was at
all times in its final configuration. The failure
intervals are then processed as they would be for a
stable program. The approach is developed for the
execution time theory of software reliability, but the
concepts could be applied to many other models as well.
Many definitions are given to describe program size
variation and associated phenomena. Attention is
focused on the special case of sequential integration
and pure growth. The adjustment method is described and
its benefits in improving the estimation of quantities
of interest to the software manager are illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comer:1981:CTD,
author = "J. R. Comer and J. R. Rinewalt and M. M. Tanik",
title = "A comparison of two different program complexity
measures",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "26--28",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, a number of program complexity
metrics have been developed to measure various
characteristics of computer programs [1, 3]. Included
among these metrics are Zolnowski's composite measure
of program complexity [4, 5] and McCade's cyclomatic
measure of program complexity [2]. The present paper
examines these two metrics and attempts to measure
their correlation with a third metric assigned by the
program's author. This metric has been called the
psychological complexity or the intuitive complexity of
a program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abrams:1981:NNM,
author = "Marshall D. Abrams and Dorothy C. Neiman",
title = "{NBS} network measurement methodology applied to
synchronous communications",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "29--36",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on the application of the NBS
Network Measurement Instrument (NMI) to synchronous
data communication. The suitability of the underlying
Stimulus --- Acknowledgement --- Response (SAR) model
to support the implementation of this methodology
permitting quantitative evaluation of interactive
teleprocessing service delivered to the user is
described. The logic necessary to interpret SAR
components and boundaries depends on character time
sequence for asynchronous data communications traffic
but entails protocol decomposition and content analysis
for character synchronous data traffic. The
decomposition and analysis rules necessary to evaluate
synchronous communications are discussed and the level
of protocol violation detection which results as a
byproduct is cited. Extensions to the utility of the
Network Measurement Instrument (NMI), deriving from
additional workload profiling measures desirable for
character synchronous communications, are also
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data communications; protocol validation; synchronous;
teleprocessing service evaluation",
}
@Article{Larsen:1981:CEL,
author = "R. L. Larsen and J. R. Agre and A. K. Agrawala",
title = "A comparative evaluation of local area communication
technology",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "37--47",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The builder of a local area network is immediately
confronted with the selection of a communications
architecture to interconnect the elements (hosts and
terminals) of the network. This choice must often be
made in the presence of great uncertainty regarding the
available alternatives and their capabilities, and a
dearth of comparative information. This was the
situation confronting NASA upon seriously considering
local area networks as an architecture for mission
support operations. As a result, a comparative study
was performed in which alternative communication
architectures were evaluated under similar operating
conditions and system configurations. Considered were:
(1) the ring, (2) the cable-bus, (3) a
circuit-switching system, and (4) a shared memory
system. The principle performance criterion used was
the mean time required to move a message from one host
processor to another host processor. Local operations
within each host, such as interrupt service time, were
considered to be part of this overall time. The
performance of each alternative was evaluated through
simulation models and is summarized in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1981:HPT,
author = "Herman D. Hughes",
title = "A highly parameterized tool for studying performance
of computer systems",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "48--65",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A highly parameterized simulation model is described
which allows experiments to be performed for computer
performance evaluations studies. The results of these
experiments can be used to evaluate the effect of
changing the hardware configuration, the workload, the
scheduling policy, the multiprogramming level, etc. The
model is constructed to function either as a batch or
time-sharing system, or as a combination of both. This
simulation model also has the potential of providing
dynamic feedback for the scheduler. A discussion of the
design, implementation, and use of the model is
presented. Examples are provided to illustrate some
possible uses of the model and verifications of the
results obtained from the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cumulative distribution function; events; hardware
configuration; model validation; queue; scheduling
policies; simulation model; system performance;
workloads",
}
@Article{Spiegel:1981:RPP,
author = "Mitchell G. Spiegel",
title = "{RTE}'s: past is prologue",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "66--73",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper surveys the evolution of Remote Terminal
Emulators (RTEs). Major developments in RTE technology
are separated into three `generations' of products.
Each generation's unique applications and features are
highlighted. Recent developments are noted and a
prediction of future use for RTEs is provided.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Browne:1981:DSP,
author = "J. C. Browne",
title = "Designing systems for performance",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "1--1",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805467",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time systems and systems to interface human work
environments will dominate the growth of computer
applications over the next decade. These systems must
execute their functions with the timeliness and
responsiveness required in these environments. The
design, development and testing of such systems must
guarantee performance as well as functionality and
reliability. There is not yet in place a technology to
support this requirement for engineering of
performance. The research and development community in
performance has focused primarily on analysis and
deduction rather than the performance arena. This talk
will define and discuss the tasks of engineering
performance into software systems and describe the
recent progress towards this goal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiner:1981:MAP,
author = "David Reiner and Tad Pinkerton",
title = "A method for adaptive performance improvement of
operating systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "2--10",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805468",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a method for dynamic modification
of operating system control parameters to improve
system performance. Improved parameter settings are
learned by experimenting on the system. The experiments
compare the performance of alternative parameter
settings in each region of a partitioned
load-performance space associated with the system. The
results are used to modify important control parameters
periodically, responding to fluctuations in system load
and performance. The method can be used to implement
adaptive tuning, to choose between alternative
algorithms and policies, or to select the best fixed
settings for parameters which are not modified. The
method was validated and proved practical by an
investigation of two parameters governing core quantum
allocation on a Sperry Univac 1100 system. This
experiment yielded significant results, which are
presented and discussed. Directions for future research
include automating the method, determining the effect
of simultaneous modifications to unrelated control
parameters, and detecting dominant control
parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1981:VVT,
author = "Y. T. Wang",
title = "On the {VAX\slash VMS} time-critical process
scheduling",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "11--18",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805469",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The VAX/VMS process schedule is briefly described. A
simple priority-driven round-robin queuing model is
then constructed to analyze the behavior of the
time-critical processes of VAX/VMS under such a
schedule. Mean and variance of the conditional response
time of a process at a given priority are derived,
conditioned on the amount of service time required by
that process. Numerical results are given with
comparisons to the ordinary priority queuing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soderlund:1981:ECP,
author = "Lars S{\"o}derlund",
title = "Evaluation of concurrent physical database
reorganization through simulation modeling",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "19--32",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805470",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a database system commonly
deteriorates due to degradation of the database's
physical data structure. The structure degradation is a
consequence of the normal operations of a general
database management system. When system performance has
degraded below acceptable limits the database must be
reorganized. In conventional, periodic reorganization
the database, or part of it, is taken off line while
the data structure is being reorganized. This paper
presents results from a study where it is shown that
concurrent reorganization, i.e. a continuous
reorganization of the physical data structure while
application processes have full access to the database,
is an attractive alternative to conventional
reorganization. The paper also presents a solution to a
methodological problem concerning the simulation of a
system which has activities with extremely varying
durations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazowska:1981:AMD,
author = "Edward D. Lazowska and John Zahorjan",
title = "Analytic modelling of disk {I/O} subsystems: a
tutorial",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "33--35",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805471",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a summary of a tutorial presented during the
conference discussing a number of approaches to
representing disk I/O subsystems in analytic models of
computer systems. As in any analytic modelling study,
the fundamental objective in considering an I/O
subsystem is to determine which devices should be
represented in the model, and what their loadings
should be. The device loadings represent the service
required by jobs, and are the basic parameters needed
by the computational algorithm which calculates
performance measures for the model. To set these
parameters, knowledge of service times at the various
devices in the I/O subsystem is required. The tutorial
begins by distinguishing analytic modelling from
alternative approaches, by identifying the parameter
values that are required for an analytic modelling
study, and by explaining the role of the computational
algorithm that is employed (Denning \& Buzen [1978]
provide a good, although lengthy, summary). We then
consider a sequence of models of increasingly complex
I/O subsystems. Next we discuss I/O subsystems with
rotational position sensing. We then discuss approaches
to modelling shared DASD, emphasizing hierarchical
techniques in which highlevel models of each system can
be analyzed in isolation. We also mention recent
techniques for modelling complex I/O subsystems
involving multipathing. Finally, we discuss the
analysis of I/O subsystems based on broadcast channels
such as Ethernet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dowdy:1981:MUS,
author = "Lawrence W. Dowdy and Hans J. Breitenlohner",
title = "A model of {Univac 1100\slash 42} swapping",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "36--47",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805472",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a computer system depends upon the
efficiency of its swapping mechanisms. The swapping
efficiency is a complex function of many variables. The
degree of multiprogramming, the relative loading on the
swapping devices, and the speed of the swapping devices
are all interdependent variables that affect swapping
performance. In this paper, a model of swapping
behavior is given. The interdependencies between the
degree of multiprogramming, the swapping devices'
loadings, and the swapping devices' speeds are modeled
using an iterative scheme. The validation of a model is
its predictive capability. The given swapping model was
applied to a Univac 1100/42 system to predict the
effect of moving the swapping activity from drums to
discs. When the swapping activity was actually moved,
throughput increased by 20\%. The model accurately
predicted this improvement. Subtopics discussed
include: (1) the modeling of blocked and overlapped
disc seek activity, (2) the usefulness of empirical
formulae, and (3) the calibration of unmeasurable
parameters. Extensions and further applications of the
model are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Closed queuing networks; Model validation; Parameter
interdependencies; Performance prediction; Swapping",
}
@Article{Turner:1981:SFP,
author = "Rollins Turner and Henry Levy",
title = "Segmented {FIFO} page replacement",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "48--51",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805473",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A fixed-space page replacement algorithm is presented.
A variant of FIFO management using a secondary FIFO
buffer, this algorithm provides a family of performance
curves lying between FIFO and LRU. The implementation
is simple, requires no periodic scanning, and uses no
special hardware support. Simulations are used to
determine the performance of the algorithm for several
memory reference traces. Both the fault rates and
overhead cost are examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FIFO page replacement; LRU page replacement; Page
replacement algorithms; Performance evaluation",
}
@Article{Ferrari:1981:GMW,
author = "Domenico Ferrari",
title = "A generative model of working set dynamics",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "52--57",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805474",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An algorithm for generating a page reference string
which exhibits a given working set size behavior in the
time domain is presented, and the possible applications
of such a string are discussed. The correctness of the
algorithm is proved, and its computational complexity
found to be linear in the length of the string. A
program implementing the algorithm, which is performed
in one pass and requires very little space, is briefly
described, and some experimental results are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zahorjan:1981:BJB,
author = "J. Zahorjan and K. C. Sevcik and D. L. Eager and B. I.
Galler",
title = "Balanced job bound analysis of queueing networks",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "58--58",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805475",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Applications of queueing network models to computer
system performance prediction typically involve the
computation of their equilibrium solution. When
numerous alternative systems are to be examined and the
numbers of devices and customers are large, however,
the expense of computing the exact solutions may not be
warranted by the accuracy required. In such situations,
it is desirable to be able to obtain bounds on the
system solution with very little computation.
Asymptotic bound analysis (ABA) is one technique for
obtaining such bounds. In this paper, we introduce
another bounding technique, called balanced job bounds
(BJB), which is based on the analysis of systems in
which all devices are equally utilized. These bounds
are tighter than ABA bounds in many cases, but they are
based on more restrictive assumptions (namely, those
that lead to separable queueing network models).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neuse:1981:SHA,
author = "D. Neuse and K. Chandy",
title = "{SCAT}: a heuristic algorithm for queueing network
models of computing systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "59--79",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new algorithm for the
approximate analysis of closed product-form queueing
networks with fixed-rate, delay (infinite-server), and
load-dependent queues. This algorithm has the accuracy,
speed, small memory requirements, and simplicity
necessary for inclusion in a general network analysis
package. The algorithm allows networks with large
numbers of queues, job classes, and populations to be
analyzed interactively even on microcomputers with very
limited memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Analytic models; Approximations; Iterative algorithms;
Load-dependent queues; Performance analysis;
Product-form; Queueing networks",
}
@Article{Zahorjan:1981:SSQ,
author = "John Zahorjan and Eugene Wong",
title = "The solution of separable queueing network models
using mean value analysis",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "80--85",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Because it is more intuitively understandable than the
previously existing convolution algorithms, Mean Value
Analysis (MVA) has gained great popularity as an exact
solution technique for separable queueing networks.
However, the derivations of MVA presented to date apply
only to closed queueing network models. Additionally,
the problem of the storage requirement of MVA has not
been dealt with satisfactorily. In this paper we
address both these problems, presenting MVA solutions
for open and mixed load independent networks, and a
storage maintenance technique that we postulate is the
minimum possible of any ``reasonable'' MVA technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1981:ASQ,
author = "Alexander Thomasian and Behzad Nadji",
title = "Aggregation of stations in queueing network models of
multiprogrammed computers",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "86--104",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In queueing network models the complexity of the model
can be reduced by aggregating stations. This amounts to
obtaining the throughput of the flow-equivalent station
for the subnetwork of stations to be aggregated. When
the subnetwork has a separable solution, aggregation
can be carried out using the Chandy--Herzog--Woo
theorem. The throughput of the subnetwork can be
expressed explicitly in terms of its parameters when
the stations are balanced (have equal utilizations).
This expression for throughput can be used as an
approximation when the stations are relatively
unbalanced. The basic expression can be modified to
increase the accuracy of the approximation. A
generating function approach was used to obtain upper
bounds on the relative error due to the basic
approximation and its modifications. Provided that the
relative error bound is tolerable, a set of unbalanced
stations can be replaced by a single aggregate station
or a set of balanced stations. Finally, we propose a
methodology to simplify the queueing network model of a
large-scale multiprogrammed computer, which makes use
of the previous aggregation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schwetman:1981:CSM,
author = "Herb Schwetman",
title = "Computer system models: an introduction",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "105--105",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805479",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A system model is a tool used to predict system
performance under changing conditions. There are two
widely used modeling techniques: one based on discrete
event simulation and one based on queuing theory
models. Because queueing theory models are so much
cheaper to implement and use, as compared to simulation
models, there is growing interest in them. Users are
developing and using queuing theory models to project
system performance, project capacity, analyze
bottlenecks and configure systems. This talk uses an
operational analysis approach to develop system models.
This approach, as presented in Denning and Buzen [1],
provides an intuitive basis for analyzing system
performance and constructing system models. Very simple
calculations lead to estimates of bounds on performance
--- maximum job throughput rates and minimum message
response times. The emphasis is on gaining an
understanding of system models which reenforces
intuition, not on mathematical formulae. Several
examples are included. References to other works and
publications are provided. Application areas and
limitations of modeling techniques are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1981:PEE,
author = "Peter J. Denning",
title = "Performance evaluation: {Experimental} computer
science at its best",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "106--109",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What is experimental computer science? This question
has been widely discussed ever since the Feldman Report
was published (1979 [18]). Many computer scientists
believe that survival of their discipline is intimately
linked to their ability to rejuvenate experimentation.
The National Science Foundation instituted the
Coordinated Experimental Research Program (CERP) in
1979 to help universities set up facilities capable of
supporting experimental research. Other agencies of
government are considering similar programs. Some
industrial firms are offering similar help through
modest cash grants and equipment discounts. What is
experimental computer science? Surprisingly, computer
scientists disagree on the answer. A few believe that
computer science is in flux --- making a transition
from theoretical to experimental science --- and,
hence, no operational definition is yet available. Some
believe that it is all the non-theoretical activities
of computer science, especially those conferring
``hands-on'' experience. Quite a few believe that it is
large system development projects --- i.e., computer
and software engineering --- and they cite MIT's
Multics, Berkeley's version of Bell Labs' UNIX, the
ARPAnet, IBM's database System R, and Xerox's
Ethernet-based personal computer network as examples.
These beliefs are wrong. There are well-established
standards for experimental science. The field of
performance evaluation meets these standards and
provides examples of experimental science for the rest
of the computing field.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rafii:1981:SAM,
author = "Abbas Rafii",
title = "Structure and application of a measurement tool ---
{SAMPLER\slash 3000}",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "110--120",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Design, internal structure, implementation experience
and a number of unique features of the SAMPLER/3000
performance evaluation tool are presented. This package
can be used to produce program CPU and wait time
profiles in several levels of detail in terms of code
segments, procedure names and procedure relative
addresses. It also provides an accurate profile of the
operating systems code which is exercised to service
requests from the selective parts of the user code.
Programs can be observed under natural load conditions
in a single user or shared environment. A program's CPU
usage is determined in terms of direct and indirect
cost components. The approaches to determine direct and
indirect CPU times are described. A program counter
sampling technique in virtual memory domain is
discussed. Certain interesting aspects of data analysis
and on-line data presentation techniques are described.
The features of the computer architecture, the services
of the loader and compilers which relate to the
operation of the tool are discussed. A case study is
finally presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tolopka:1981:ETM,
author = "Stephen Tolopka",
title = "An event trace monitor for the {VAX 11\slash 780}",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "121--128",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an event trace monitor
implemented on Version 1.6 of the VMS operating system
at Purdue University. Some necessary VMS terminology is
covered first. The operation of the data gathering
mechanism is then explained, and the events currently
being gathered are listed. A second program, which
reduces the data gathered by the monitor to usable
form, is next examined, and some examples depicting its
operation are given. The paper concludes with a brief
discussion of some of the monitor's uses.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Artis:1981:LFD,
author = "H. Pat Artis",
title = "A log file design for analyzing secondary storage
occupancy",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "129--135",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805483",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A description of the design and implementation of a
log file for analyzing the occupancy of secondary
storage on IBM computer systems is discussed. Typical
applications of the data contained in the log are also
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sanguinetti:1981:ESS,
author = "John Sanguinetti",
title = "The effects of solid state paging devices in a large
time-sharing system",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "136--153",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports the results of some measurements
taken on the effects two new solid state paging
devices, the STC 4305 and the Intel 3805, have on
paging performance in the Michigan Terminal System at
the University of Michigan. The measurements were taken
with a software monitor using various configurations of
the two solid state devices and the fixed head disk,
which they replace. Measurements were taken both during
regular production and using an artificial load created
to exercise the paging subsystem. The results confirmed
the expectation that the solid state paging devices
provide shorter page-in waiting times than the
fixed-head disk, and also pointed up some of the
effects which their differing architectures have on the
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1981:VMB,
author = "Richard T. Wang and J. C. Browne",
title = "Virtual machine-based simulation of distributed
computing and network computing",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "154--156",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes the use of virtual machine
architectures as a means of modeling and analyzing
networks and distributed computing systems. The
requirements for such modeling and analysis are
explored and defined along with an illustrative study
of an X.25 link-level protocol performance under normal
execution conditions. The virtualizable architecture
used in this work is the Data General Nova 3/D.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huslende:1981:CEP,
author = "Ragnar Huslende",
title = "A combined evaluation of performance and reliability
for degradable systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "157--164",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the field of fault-tolerant computing is maturing
and results from this field are taken into practical
use the effects of a failure in a computer system need
not be catastrophic. With good fault-detection
mechanisms it is now possible to cover a very high
percentage of all the possible failures that can occur.
Once a fault is detected, systems are designed to
reconfigure and proceed either with full or degraded
performance depending on how much redundancy is built
into the system. It should be noted that one particular
failure may have different effects depending on the
circumstances and the time at which it occurs. Today we
see that large numbers of resources are being tied
together in complex computer systems, either locally or
in geographically distributed systems and networks. In
such systems it is obviously very undesirable that the
failure of one element can bring the entire system
down. On the other hand one can usually not afford to
design the system with sufficient redundancy to mask
the effect of all failures immediately.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacobson:1981:MSD,
author = "Patricia A. Jacobson and Edward D. Lazowska",
title = "The method of surrogate delays: {Simultaneous}
resource possession in analytic models of computer
systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "165--174",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805487",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new approach to modelling the
simultaneous or overlapped possession of resources in
queueing networks. The key concept is that of iteration
between two models, each of which includes an explicit
representation of one of the simultaneously held
resources and a delay server (an infinite server, with
service time but no queueing) acting as a surrogate for
queueing delay due to congestion at the other
simultaneously held resource. Because of this, we refer
to our approximation technique as the ``method of
surrogate delays''.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacobson:1981:AAM,
author = "Patricia Jacobson",
title = "Approximate analytic models of arbiters",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "175--180",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Results at very light and very heavy loads are easy to
obtain, but at intermediate loads performance modelling
is necessary. Because of the considerable cost of
simulation, we develop queueing network models which
can be solved quickly by approximate analytic
techniques. These models are validated by comparing
with simulations at certain points, and then used to
get a wide range of results quickly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Briggs:1981:PCB,
author = "Fay{\'e} A. Briggs and Michel Dubois",
title = "Performance of cache-based multiprocessors",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "181--190",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805489",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A possible design alternative to improve the
performance of a multiprocessor system is to insert a
private cache between each processor and the shared
memory. The caches act as high-speed buffers, reducing
the memory access time, and affect the delays caused by
memory conflicts. In this paper, we study the
performance of a multiprocessor system with caches. The
shared memory is pipelined and interleaved to improve
the block transfer rate, and assumes an L-M
organization, previously studied under random word
access. An approximate model is developed to estimate
the processor utilization and the speedup improvement
provided by the caches. These two parameters are
essential to a cost-effective design. An example of a
design is treated to illustrate the usefulness of this
investigation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bryant:1981:QNA,
author = "R. M. Bryant and J. R. Agre",
title = "A queueing network approach to the module allocation
problem in distributed systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "191--204",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Given a collection of distributed programs and the
modules they use, the module allocation problem is to
determine an assignment of modules to processors that
minimizes the total execution cost of the programs.
Standard approaches to this problem are based on
solving either a network flow problem or a constrained
$0$-$1$ integer programming problem. In this paper we
discuss an alternative approach to the module
allocation problem where a closed, multiclass queueing
network is solved to determine the cost of a particular
module allocation. The advantage of this approach is
that the execution cost can be expressed in terms of
performance measures of the system such as response
time. An interchange heuristic is proposed as a method
of searching for a good module allocation using this
model and empirical evidence for the success of the
heuristic is given. The heuristic normally finds module
allocations with costs within 10 percent of the optimal
module allocation. Fast, approximate queueing network
solution techniques based on mean-value-analysis allow
each heuristic search to be completed in a few seconds
of CPU time. The computational complexity of each
search is $ O(M K (K + N) C)$ where $M$ is the number
of modules, $K$ is the number of sites in the network,
$N$ is the number of communications processors, and $C$
is the number of distributed program types. It appears
that substantial problems of this type could be solved
using the methods we describe.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Distributed computer systems; File assignment problem;
Mean-value analysis; Multiclass queueing network model;
Task allocation problem",
}
@Article{Marathe:1981:AME,
author = "Madhav Marathe and Sujit Kumar",
title = "Analytical models for an {Ethernet}-like local area
network link",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "205--215",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805491",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ethernet-like local area network links have been
studied by a number of researchers. Most of these
studies have involved extensive simulation models
operating at the level of individual packets. However,
as we begin building models of systems built around
such links, detailed simulation models are neither
necessary, nor cost-effective. Instead, a simple
analytical model of the medium should be adequate as a
component of the higher level system models. This paper
discusses a number of analytical models and identifies
a last-in-first-out M/G/1 model with slightly increased
service time as one which adequately captures both the
mean and the coefficient of variation of the response
time. Given any offered load, this model can be used to
predict the mean waiting time and its coefficient of
variation. These two can be used to construct a
suitable 2 stage hyperexponential distribution. Random
numbers can then be drawn from this distribution for
use as waiting times of individual packets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pechura:1981:PLM,
author = "Michael A. Pechura",
title = "Page life measurements",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "10--12",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Clark:1981:UES,
author = "Jon D. Clark",
title = "An update on economies-of-scale in computing systems",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "13--14",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Janusz:1981:GMS,
author = "Edward R. Janusz",
title = "Getting the most out of a small computer",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "22--35",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1981:DDD,
author = "Springer Cox",
title = "Data, definition, deduction: an empirical view of
operational analysis",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "36--44",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muramatsu:1981:SVQ,
author = "Hiroshi Muramatsu and Masahiro Date and Takanori
Maki",
title = "Structural validation in queueing network models of
computer systems",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "41--46",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sauer:1981:NSS,
author = "Charles H. Sauer",
title = "Numerical solution of some multiple chain queueing
networks",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "47--56",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports the results of simulation
experiment of a model of a virtual memory computer. The
model consists of three major subsystems: Program
Behavior, Memory Allocation and Secondary Storage. By
adapting existing models of these subsystems an overall
model for the computer operation is developed and its
performance is tested for various design alternatives.
The results are reported for different paging devices,
levels of multiprogramming, job mixes, memory
allocation scheme, page service scheduling and page
replacement rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nemeth:1981:AIP,
author = "Thomas A. Nemeth",
title = "An approach to interactive performance analysis in a
busy production system {(NOS/BE)}",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "57--73",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041808.1041815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many different ideas have been promulgated on
performance evaluation by software and hardware
monitoring or modelling, but most of these have
associated implementation problems in practice. By
adopting a slightly different approach, (using an
approximation to `service wait time'), an analysis of
response is possible in a production system, with
negligible overhead. This analysis allows the actual
areas of contention to be identified, and some rather
unexpected results emerge, with a direct application to
scheduling policy. The work was done using the NOS/BE
operating system on a CDC Cyber 173 at the University
of Adelaide.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measurement; multiprogramming; performance evaluation;
production; response; scheduling; timesharing",
}
@Article{Knudson:1981:CPE,
author = "Michael E. Knudson",
title = "A computer performance evaluation operational
methodology",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "74--80",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041808.1041816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method suggesting how to organize and operate a
Computer Performance and Evaluation (CPE) project is
presented. It should be noted that the suggested
principles could apply to a modeling or simulation
effort.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Afshari:1981:MNT,
author = "P. V. Afshari and S. C. Bruell and R. Y. Kain",
title = "Modeling a new technique for accessing shared buses",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "4--13",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801685",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a queueing system in which customers (or
jobs) arrive to one of $Q$ separate queues to await
service from one of $S$ identical servers (Figure 1).
Once a job enters a queue it does not leave that queue
until it has been selected for service. Any server can
serve any job from any queue. A job selected for
service cannot be preempted. In this paper we consider
jobs to be in a single class; for the multiple class
result see [AFSH81a]. We assume once a queue has been
selected, job scheduling from that queue is fair. In
particular, our results hold for first come first serve
as well as random selection [SPIR79] and, for that
matter, any fair nonpreemptive scheduling policy within
a queue. We assume that arrivals to each queue follow a
Poisson process with the mean arrival rate to queue $q$
being $ \lambda q$. The $S$ identical exponential
servers are each processing work at a mean rate of $
\mu $. This system is general enough to be adaptable
for modeling many different applications. By choosing
the policy employed for queue selection by the servers,
we can model multiplexers, channels, remote job entry
stations, certain types of communication processors
embedded in communication networks, and sets of shared
buses. In this paper we will use the latter application
to discuss a realistic situation. The elements
(``jobs'') in the queues are messages to be sent from
modules connected to the shared bus of the system. The
servers are the buses; their service times are equal to
the message transmission times. The queues are in the
interface modules connected to and sharing the buses.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazar:1981:OCM,
author = "Aurel A. Lazar",
title = "Optimal control of a {M\slash M\slash m} queue",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "14--20",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801686",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of optimal control of a M/M/m queueing
system is investigated. As in the M/M/l case the
optimum control is shown to be a window type mechanism.
The window size $L$ depends on the maximum allowable
time delay $T$ and can be explicitly computed. The
throughput time delay function of the M/M/m system is
briefly discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spirn:1981:NMB,
author = "Jeffrey R. Spirn",
title = "Network modeling with bursty traffic and finite buffer
space",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "21--28",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801687",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a class of queueing network
models, and a method for their approximate solution,
for computer networks with bursty traffic and finite
buffer space. The model is open, implying no population
limit except for buffer size limits and therefore no
window-type flow control mechanism. Each node of the
computer network is represented as a finite-length
queue with exponential service and an arrival process
which is initially bulk Poisson, but becomes less and
less clustered from hop to hop. Elaborations are
possible to account for varying mean packet sizes and
certain buffer pooling schemes, although these involve
further approximation. The approximations of the method
were validated against several simulations, with
reasonable agreement, and certainly with much less
error than is obtained by modeling a bursty traffic
source as Poisson.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:1981:ORN,
author = "Simon S. Lam and Y. Luke Lien",
title = "Optimal routing in networks with flow-controlled
virtual channels",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "38--46",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet switching networks with flow-controlled virtual
channels are naturally modeled as queueing networks
with closed chains. Available network design and
analysis techniques, however, are mostly based upon an
open-chain queueing network model. In this paper, we
first examine the traffic conditions under which an
open-chain model accurately predicts the mean
end-to-end delays of a closed-chain model having the
same chain throughputs. We next consider the problem of
optimally routing a small amount of incremental traffic
corresponding to the addition of a new virtual channel
(with a window size of one) to a network. We model the
new virtual channel as a closed chain. Existing flows
in the network are modeled as open chains. An optimal
routing algorithm is then presented. The algorithm
solves a constrained optimization problem that is a
compromise between problems of unconstrained
individual-optimization and unconstrained
network-optimization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Livny:1981:LBH,
author = "Miron Livny and Myron Melman",
title = "Load balancing in homogeneous broadcast distributed
systems",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "47--55",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Three different load balancing algorithms for
distributed systems that consist of a number of
identical processors and a CSMA communication system
are presented in this paper. Some of the properties of
a multi-resource system and the balancing process are
demonstrated by an analytic model. Simulation is used
as a mean for studying the interdependency between the
parameters of the distributed system and the behaviour
of the balancing algorithm. The results of this study
shed light on the characteristics of the load balancing
process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wecker:1981:PGD,
author = "Stuart Wecker and Robert Gordon and James Gray and
James Herman and Raj Kanodia and Dan Seligman",
title = "Performance of globally distributed networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "58--58",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801690",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the design and implementation of computer networks
one must be concerned with their overall performance
and the efficiency of the communication mechanisms
chosen. Performance is a major issue in the
architecture, implementation, and installation of a
computer communication network. The architectural
design always involves many cost/performance tradeoffs.
Once implemented, one must verify the performance of
the network and locate bottlenecks in the structure.
Configuration and installation of a network involves
the selection of a topology and communication
components, channels and nodes of appropriate capacity,
satisfying performance requirements. This panel will
focus on performance issues involved in the efficient
design, implementation, and installation of globally
distributed computer communication networks.
Discussions will include cost/performance tradeoffs of
alternative network architecture structures, methods
used to measure and isolate implementation performance
problems, and configuration tools to select network
components of proper capacity. The panel members have
all been involved in one or more performance issues
related to the architecture, implementation, and/or
configuration of the major networks they represent.
They will describe their experiences relating to
performance issues in these areas. Methodologies and
examples will be chosen from these networks in current
use. There will be time at the end of the session for
questions to the panel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gordon:1981:OMH,
author = "R. L. Gordon",
title = "Operational measurements on a high performance ring",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "59--59",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801691",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Application and system software architecture can
greatly influence the operational statistics of a local
network. The implementation of a transparent file
system on top of a high bandwidth local network has
resulted in generating a high degree of file traffic
over the local network whose characteristics are
largely fixed and repeatable. These statistics will be
presented along with arguments for and against
designing mechanisms that optimize specifically for
that class of traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Local networks; Performance; Remote files",
}
@Article{Gray:1981:PSL,
author = "James P. Gray",
title = "Performance of {SNA}'s {LU-LU} session protocols",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "60--61",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SNA is both an architecture and a set of products
built in conformance with the architecture (1,2,3). The
architecture is layered and precisely defined; it is
both evolutionary and cost effective for implementing
products. Perhaps the largest component of cost
effectiveness is performance: transaction throughput
and response times. For SNA, this involves data link
control protocols (for SDLC and S/370 channel DLC's),
routing algorithms, protocols used on the sessions that
connect logical units (LU-LU session protocols), and
interactions among them. SNA's DLC and routing
protocols have been discussed elsewhere (4,5,6); this
talk examines protocols on sessions between logical
units (LU-LU session protocols) and illustrates the
results of design choices by comparing the performance
of various configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Herman:1981:APT,
author = "James G. Herman",
title = "{ARPANET} performance tuning techniques",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "62--62",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As part of its operation and maintenance of the
ARPANET for the past twelve years, BBN has been asked
to investigate a number of cases of degradation in
network performance. This presentation discusses the
practical methods and tools used to uncover and correct
the causes of these service problems. A basic iterative
method of hypothesis generation, experimental data
gathering, and analysis is described. Emphasis is
placed on the need for experienced network analysts to
direct the performance investigation and for the
availability of network programmers to provide special
purpose modifications to the network node software in
order to probe the causes of the traffic patterns under
observation. Many typical sources of performance
problems are described, a detailed list of the tools
used by the analyst are given, and a list of basic
techniques provided. Throughout the presentation
specific examples from actual ARPANET performance
studies are used to illustrate the points made.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aleh:1981:DUB,
author = "Avner Aleh and K. Dan Levin",
title = "The determination of upper bounds for economically
effective compression in packet switching networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "64--72",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801694",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper deals with the economic tradeoffs
associated with data compression in a packet switching
environment. In section II we present the data profile
concept and the compression analysis of typical
file-transfer data strings. This is followed by a
compression cost saving model that is developed in
section III. Upper bounds for an economically effective
compression service are derived there, and the paper
concludes with an example of these bounds based on
state of the art technology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McGregor:1981:CMP,
author = "Patrick V. McGregor",
title = "Concentrator modeling with pipelining arrivals
compensation",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "73--94",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801695",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A general model of Intelligent Communications
Concentrating Devices (ICCD) is presented and analyzed
for delay and overflow performance with compensation
for the pipelining effect of message arrivals extending
over time. The results of the analysis indicate that,
for the same trunk utilization, the trend towards
buffered terminals with longer messages requires
substantially greater buffering in the ICCD. The
nominal environment analyzed consisted of 10--40 medium
speed terminals (1200 b/s--9600 b/s) operating over a
medium speed trunk (9600 b/s) with trunk utilizations
from 20 percent to 80 percent and average message
lengths up to 1000 characters. This is a substantially
different environment than that typically served by
current implementations of ICCDs, which are frequently
reported to have throughput improvements of 2-3 times
the nominal originating terminal bandwidths, as opposed
to the typical factor of 5 for the analyzed
environment. This does not reflect on the
appropriateness of the ICCDs in serving the new
environment, but rather is simply stating that in the
new environment the same character volume of traffic
may be appearing with different traffic characteristics
over higher speed access lines. If the new environment
shows only a difference in traffic characteristics and
originating line speed, without change in the traffic
control scheme (or lack of scheme), the results
indicate essentially reproduction of a large part of
the terminal buffering in the ICCD for adequate
overflow performance. Alternatively, with smarter
terminals, traffic control schemes (flow control) may
enable the ICCD to be reduced to an essentially
unbuffered ``traffic cop,'' with the terminal buffering
also serving as the shared facility buffering. Several
practical implementations of ICCDs have provision for
flow control, but require cooperating terminals and
hosts. This suggests that ICCD design and application
will become more sensitive to the practical operating
features of the target environment than has been
generally the case to date. The analysis presented in
this paper involves many simplifications to the actual
problem. Additional work to accommodate non-exponential
message length distributions and heterogeneous terminal
configurations are perhaps two of the more immediate
problems that may be effectively dealt with.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mink:1981:MEC,
author = "Alan Mink and Charles B. {Silio, Jr.}",
title = "Modular expansion in a class of homogeneous networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "95--100",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a special class of homogeneous computer
network comprising several essentially identical but
independent computing systems (ICSs) sharing a single
resource. Of interest here are the effects of modularly
expanding the network by adding ICSs. We use a
previously presented approximate queueing network model
to analyze modular expansion in this class of network.
The performance measure used in this analysis is the
mean cycle time, which is the mean time between
successive requests for service by the same job at the
CPU of an ICS. In this analysis we derive an
intuitively satisfying mathematical relation between
the addition of ICSs and the incremental increase in
the service rate of the shared resource required to
maintain the existing level of system performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thareja:1981:UBA,
author = "Ashok K. Thareja and Satish K. Tripathi and Richard A.
Upton",
title = "On updating buffer allocation",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "101--110",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most of the analysis of buffer sharing schemes has
been aimed at obtaining the optimal operational
parameters under stationary load situations. It is well
known that in most operating environments the traffic
load changes. In this paper, we address the problem of
updating buffer allocation as the traffic load at a
network node changes. We investigate the behavior of a
complete partitioning buffer sharing scheme to gain
insight into the dependency of the throughput upon
system parameters. The summary of the analysis is
presented in the form of a heuristic. The heuristic is
shown to perform reasonably well under two different
types of stress tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elsanadidi:1981:ATW,
author = "M. Y. Elsanadidi and Wesley W. Chu",
title = "An analysis of a time window multiaccess protocol with
collision size feedback {(WCSF)}",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "112--118",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the performance of a window multiaccess
protocol with collision size feedback. We obtain bounds
on the throughput and the expected packet delay, and
assess the sensitivity of the performance to collision
recognition time and packet transmission time. An
approximate optimal window reduction factor to minimize
packet isolation time is {equation}, where $n$ is the
collision size and $R$ the collision recognition time
(in units of packet propagation delay). The WCSF
protocol, which requires more information than CSMA-CD,
is shown to have at least 30\% more capacity than
CSMA-CD for high bandwidth channels; that is, when
packet transmission time is comparable to propagation
delay. The capacity gain of the WCSF protocol decreases
as the propagation delay decreases and the collision
recognition time increases. Our study also reveals the
inherent stability of WCSF. When the input load
increases beyond saturation. The throughput remains at
its maximum value.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roehr:1981:PALa,
author = "Kuno M. Roehr and Horst Sadlowski",
title = "Performance analysis of local communication loops",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "119--129",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The communication loops analyzed here provide an
economic way of attaching many different terminals
which may be some kilometers away from a host
processor. Main potential bottlenecks were found to be
the loop transmission speed, the loop adapter
processing rate, and the buffering capability, all of
which are analyzed in detail. The buffer overrun
probabilities are found by convolving individual buffer
usage densities and by summing over the tail-end of the
obtained overall density function. Examples of analysis
results are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sherman:1981:DVH,
author = "R. H. Sherman and M. G. Gable and A. W. Chung",
title = "Distributed virtual hosts and networks: {Measurement}
and control",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "130--136",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801700",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Diverse network application requirements bring about
local networks of various size, degree of complexity
and architecture. The purpose of this paper is to
present a network protocol layer which is used to
provide a homogeneous operating environment and to
ensure the availability of network resources. The
network layer process probes the underlying local
network to discover its properties and then adapts to
changing network conditions. The principle contribution
of this paper is to generalize properties of diverse
local networks which can be measured. This is important
when considering maintenance and service of various
communication links. Three type of links are
point-to-point links, multi-drop, loop or switched
links and multi-access contention data buses. A
prototype network is used to show a complexity
improvement in the number of measurement probes
required using a multi-access contention bus. Examples
of measurement techniques and network adaptation are
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brice:1981:NPA,
author = "Richard Brice and William Alexander",
title = "A network performance analyst's workbench",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "138--146",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801701",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance measurement and analysis of the behavior
of a computer network usually requires the application
of multiple software and hardware tools. The location,
functionality, data requirements, and other properties
of the tools often reflect the distribution of
equipment in the network. We describe how we have
attempted to organize a collection of tools into a
single system that spans a broad subset of the
measurement and analysis activities that occur in a
complex network of heterogeneous computers. The tools
are implemented on a pair of dedicated midicomputers. A
database management system is used to couple the data
collection and analysis tools into a system highly
insulated from evolutionary changes in the composition
and topology of the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DuBois:1981:HMS,
author = "Donald F. DuBois",
title = "A {Hierarchical Modeling System} for computer
networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "147--155",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the Hierarchical Modeling System
(HMS). HMS is a tool --- a unified and expandable
system --- which supports the development of analytic
and simulator models of computer networks. The same
system and workload descriptions can be interpreted as
analytic queueing models with optimization techniques
or as discrete event simulation models. The rationale
behind the development of HMS is that high level
analyses incorporating analytic techniques may be used
in the early design phase for networks when many
options are considered while detailed simulation
studies of fewer design alternatives are appropriate
during the later stages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Analytic models; Computer networks; Hierarchical
models; Performance evaluation; Simulation",
}
@Article{Terplan:1981:NPR,
author = "K. Terplan",
title = "Network performance reporting",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "156--170",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Managing networks using Network Administration Centers
is increasingly considered. After introducing the
information demand for operational, tactical and
strategic network management the paper is dealing with
the investigation of the applicability of tools and
techniques for these areas. Network monitors and
software problem determination tools are investigated
in greater detail. Also implementation details for a
multihost-multinode network including software and
hardware tools combined by SAS are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1981:QLA,
author = "Mitchell G. Spiegel",
title = "Questions for {Local Area Network} panelists",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "172--172",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801704",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Much has been written and spoken about the
capabilities of emerging designs for Local Area
Networks (LAN's). The objective for this panel session
was to gather together companies and agencies that have
brought LAN's into operation. Questions about the
performance of LANs have piqued the curiosity of the
computer/communications community. Each member of the
panel briefly described his or her LAN installation and
workload as a means of introduction to the audience.
Questions about performance were arranged into a
sequence by performance attributes. Those attributes
thought to be of greatest important were discussed
first. Discussion on the remainder of the attributes
continued as time and audience interaction permitted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roehr:1981:PALb,
author = "Kuno M. Roehr and Horst Sadlowski",
title = "Performance analysis of local communication loops",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "173--173",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801705",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The communication loops analyzed here provide an
economical way of attaching many different terminals to
a IBM 4331 host processor which may be several
kilometers away. As a first step of the investigation
protocol overhead is derived. It consists of request
and transmission headers and the associated
acknowledgements as defined by the System Network
Architecture. Additional overhead is due to the
physical layer protocols of the Synchronous Data Link
Control including lower level confirmation frames. The
next step is to describe the performance
characteristics of the loop attachment hardware,
primarily consisting of the external loop station
adapters for local and teleprocessing connections and
the loop adapter processor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sternick:1981:SAD,
author = "Barbara R. Sternick",
title = "Systems aids in determining {Local Area Network}
performance characteristics",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "174--174",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801706",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "At Bethesda, Maryland, the National Library of
Medicine has a large array of heterogeneous data
processing equipment dispersed over ten floors in the
Lister Hill Center and four floors in the Library
Building. The National Library of Medicine decided to
implement a more flexible, expansible access medium
(Local Area Network (LAN)) to handle the rapid growth
in the number of local and remote users and the
changing requirements. This is a dual coaxial cable
communications system designed using cable television
(CATV) technology. One cable, the outbound cable,
transfers information between the headend and the user
locations. The other cable, the inbound cable,
transfers information from the user locations to the
headend. This system will permit the distribution of
visual and digital information on a single medium.
On-line devices, computers, and a technical control
system network control center are attached to the LAN
through BUS Interface Units (BIUs). The technical
control system will collect statistical and status
information concerning the traffic, BIUs, and system
components. The BIUs will, at fixed intervals, transmit
status information to the technical control. The
Network Control Centers (NCC) will provide network
directory information for users of the system,
descriptions of the services available, etc. A X.25
gateway BIU will interface the LAN to the public
networks (Telenet and Tymnet) and to X.25 host computer
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anonymous:1981:AI,
author = "Anonymous",
title = "Authors Index",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "175--175",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801707",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1982:PET,
author = "M. K. Rajaraman",
title = "Performance evaluation through job scheduler
modeling",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "9--15",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The scheduler in the Cyber-176 computer does the major
functions of routing the job through the system,
controlling job's progress through aging and swapping
of jobs between various queues and resource allocation
among jobs. This paper reports some results of the
performance evaluation study of the Cyber-176 by
modeling the scheduler as the heart of the system. The
study explores the effects of varying the scheduler
parameters in the performance of the machine in a
particular installation. The basic theme of the paper
is that the selection of parameters in a laboratory or
a system test environment may not always result in the
best performance in an actual installation. The
simulation provides vital information for installation
management and tuning the operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mager:1982:TPA,
author = "Peter S. Mager",
title = "Toward a parametric approach for modeling local area
network performance",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "17--28",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800502",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The task of modeling the performance of a single
computer (host) with associated peripheral devices is
now well understood [Computer 80]. In fact, highly
usable tools based on analytical modeling techniques
are commercially available and in widespread use
throughout the industry. [Buzen 78] [Buzen 81] [Won 81]
These tools provide a mechanism for describing
computerized environments and the workloads to be
placed on them in a highly parameterized manner. This
is important because it allows users to describe their
computer environments in a structured way that avoids
unnecessary complexity. It also is helpful in
facilitating intuitive interpretations of modeling
results and applying them to capacity planning
decisions. A first step toward building a modeling tool
and associated network specification language that
allows straightforward, inexpensive, and interpretable
modeling of multi-computer network performance is to
identify the set of characteristics (parameters) that
most heavily influence that performance. The result of
such a study for the communication aspects of local
area networks is the subject of this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaffney:1982:SSI,
author = "John E. {Gaffney, Jr.}",
title = "Score `82 --- a summary (at {IBM Systems Research
Institute}, 3\slash 23-3\slash 24\slash 82)",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "30--32",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "``Score `82'', the first workshop on software counting
rules, was attended by practitioners who are working
with ``software metrics''. The concern was with
methodologies for counting such software measurables as
the number of ``operators'', ``operands'' or the number
of lines of code in a program. A ``metric'' can be a
directly countable ``measurable'' or a quantity
computable from one or several such ``measurables''.
``Metrics'' quantify attributes of the software
development process, the software itself, or some
aspect of the interaction of the software with the
processor that hosts it. In general, a ``metric''
should be useful in the development of software and in
measuring its quality. It should have some theory to
support its existence, and it should be based on actual
software data. This workshop was concerned principally
with the data aspects of ``metrics'', especially with
the rules underlying the collection of the data from
which they are computed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misek-Falkoff:1982:NFS,
author = "Linda D. Misek-Falkoff",
title = "The new field of {``Software Linguistics''}: an
early-bird view",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "35--51",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800504",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The phrase ``Software Linguistics'' is applied here to
a text-based perspective on software quality matters.
There is much in the new work on Software Metrics
generally, and Software Science in particular, that is
reminiscent of the activities of Natural Language
analysis. Maurice Halstead held that Software Science
could shed light on Linguistics; this paper sketches
some mutually informing reciprocities between the two
fields, and across related areas of textual, literary,
discourse, and communications analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Ease of use; Linguistics; Metrics; Natural language
analysis; Quality; Software science; Text complexity",
}
@Article{Spiegel:1982:SCR,
author = "Mitchell G. Spiegel",
title = "Software counting rules: {Will} history repeat
itself?",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "52--56",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800505",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Counting rules in the software metrics field have been
developed for counting such software measurables as the
occurrence of operators, operands and the number of
lines of code. A variety of software metrics, such as
those developed by Halstead and others, are computed
from these numbers. Published material in the software
metrics field has concentrated on relationships between
various metrics, comparisons of values obtained for
different languages, etc. Yet, little, if anything has
been published on assumptions, experimental designs, or
the nature of the counting tools (or programs)
themselves used to obtain the basic measurements from
which these metrics are calculated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kavi:1982:EDS,
author = "Krishna M. Kavi and U. B. Jackson",
title = "Effect of declarations on software metrics: an
experiment in software science",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "57--71",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800506",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The attractiveness of software science [HAL77] is to
some extent due to the simplicity of its
instrumentation. Upon learning the detailed rules of
counting operators and operands, the experiments and
derivations using various algorithms and languages can
be repeated. Proposed or actual applications of
software science are quite varied (For example, see
[SEN79]). The size and construction time of a program
can be estimated from the problem specification and the
choice of programming language. An estimate of the
number of program bugs can be shown to depend on
programming effort. Optimal choice of module sizes for
multimodule implementations can be computed. Elements
of software science have applications to the analysis
of technical prose. The purpose of this experiment is
three fold. First, we want to apply software science
metrics to the language `C'. The second purpose of the
experiment is to study the effect of including
declaration statements while counting operators and
operands. Finally, we have set out to determine whether
the area of application has any influence on software
science metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaffney:1982:MIC,
author = "John E. {Gaffney, Jr.}",
title = "{Machine Instruction Count Program}",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "72--79",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Machine Instruction Count Program (MIC Program)
was originally developed in 1978 to produce `operator'
and `operand' counts of object programs written for the
AN/UYK-7 military computer. In 1981, its capability was
expanded so that it could apply to the AN/UYS-1 (or
``Advanced Signal Processor'') military computer. The
former machine, made by UNIVAC, hosts the IBM-developed
software for the sonar and defensive weapons
system/command system for the TRIDENT missile launching
submarine and the software for the sonar for the new
Los Angeles-class attack submarines. The second
machine, made by IBM, is incorporated into several
military systems including the LAMPS anti-submarine
warfare system. The MIC program has been applied to
collect a large amount of data about programs written
for the AN/UYK-7 and AN/UYS-1 computers. From these
data, various of the well-known software `metrics'(1)
such as `volume', `language level', and `difficulty'
have been calculated. Some of the results obtained have
been reported in the literature (3,4). Probably, the
most significant practical use of these data, so far,
has been the development of formulas for use in the
estimation of the amount of code to be written(2,5) as
a function of measures of the requirements that they
are to implement or the (top-level) design that they
are to implement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misek-Falkoff:1982:UHS,
author = "Linda D. Misek-Falkoff",
title = "A unification of {Halstead}'s {Software Science}
counting rules for programs and {English} text, and a
claim space approach to extensions",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "80--114",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In his Elements of Software Science, Maurice Halstead
proposed that software quality measurements could be
based on static lexemic analysis of the vocabularies of
operators and operands, and the number of occurrences
of each class, in computer programs. He also proposed
that quality issues in Natural Language text could be
addressed from similar perspectives, although his rules
for programs and for English seem to conflict. This
paper suggests that Halstead's seemingly disparate
rules for classifying the tokens of programs and the
tokens of English can be generally reconciled, although
Halstead himself does not claim such a union. The
thesis of Part One is a unification of his two
procedures, based on a linguistic partitioning between
``open'' and ``closed'' classes. This unification may
provide new inputs to some open issues concerning
coding, and suggest, on the basis of a conceptual
rationale, an explanation as to why programs which are
by Halstead's definition ``impure'' might indeed be
confusing to the human reader. Part Two of this paper,
by exploring the nodes in a textual ``Claim Space,''
briefly considers other groupings of the classes taken
as primitive by Halstead, in ways which bring to light
alternate and supplementary sets of candidate coding
rules productive for study of textual quality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Linguistics; Metrics; Natural language analysis;
Quality; Software science; Text complexity",
}
@Article{Estes:1982:DPO,
author = "George E. Estes",
title = "Distinguishing the potential operands in {FORTRAN}
programs",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "115--117",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are several possible relationships between the
number of potential operands and the actual operands
used which correlate with available data (such as
Akiyama's debugging data). However, additional data is
required to distinguish between these hypotheses. Since
there is a large body of programs available written in
FORTRAN, we wish to develop a mechanical counting
procedure to enumerate potential operands in FORTRAN
programs. We are currently developing counting rules
for these potential operands. Sub-routine parameters
and input/output variables are relatively easy to
identify. However, a number of FORTRAN features, such
as COMMON blocks and EQUIVALENCE'd variables introduce
serious complications. Some additional analysis of
usage or heuristic approaches are required to
differentiate potential operands in these situations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conte:1982:EDC,
author = "S. D. Conte and V. Y. Shen and K. Dickey",
title = "On the effect of different counting rules for control
flow operators on {Software Science} metrics in
{Fortran}",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "118--126",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Halstead in his Theory of Software Science, proposed
that in the Fortran language, each occurrence of a {\tt
GOTO i} for different label {\tt i}'s be counted as a
unique operator. Several writers have questioned the
wisdom of this method of counting GOTO's. In this
paper, we investigate the effect of counting GOTO's as
several occurrences of a single unique operator on
various software science metrics. Some 412 modules from
the International Mathematical and Statistical
Libraries (IMSL) are used as the data base for this
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanthikumar:1982:PCF,
author = "J. G. Shanthikumar and P. K. Varshney and K. Sriram",
title = "A priority cutoff flow control scheme for integrated
voice-data multiplexers",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "8--14",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807790",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the flow control problem
for a movable boundary integrated voice-data
multiplexer. We propose a flow control scheme where a
decision rule based on the data queue length is
employed to cutoff the priority of voice to prevent a
data queue buildup. A continuous-time queueing model
for the integrated multiplexer is developed. The
performance of the flow control scheme is obtained
using an efficient computational procedure. A numerical
example is presented for illustration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1982:DDD,
author = "Springer Cox",
title = "Data, definition, deduction: an empirical view of
operational analysis",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "15--20",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807791",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The theoretical aspects of operational analysis have
been considered more extensively than matters of its
application in practical situations. Since its
relationships differ in their applicability, they must
be considered separately when they are applied. In
order to do this, the foundations of three such
relationships are examined from an empirical point of
view. To further demonstrate the intimate connection
between data, definitions, and performance models, the
problem of measurement artifact is considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perros:1982:QLD,
author = "H. G. Perros",
title = "The queue-length distribution of the {M\slash Ck\slash
1} queue",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "21--24",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807792",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The exact closed-form analytic expression of the
probability distribution of the number of units in a
single server queue with Poisson arrivals and Coxian
service time distribution is obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anderson:1982:BMP,
author = "Gordon E. Anderson",
title = "{Bernoulli} methods for predicting communication
processor performance",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "25--29",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800201.807793",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a method for applying Bernoulli
trials to predict the number of communication lines a
communication processor can process without losing data
due to character overrun conditions. First, a simple
method for determining the number of lines which a
communication processor can support without possibility
of character overrun will be illustrated. Then, it will
be shown that communication processors can tolerate
occasional character overrun. Finally, using Bernoulli
trials, the probability of character overrun and the
mean time between character overrun will be calculated.
These last two figures are useful to system designers
in determining the number of lines which a
communication processor can reasonably support.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Bernoulli trials; Character overrun; Communication
processor; Markov process; Protocol; Thrashing",
}
@Article{Laurmaa:1982:AHT,
author = "Timo Laurmaa and Markku Syrj{\"a}nen",
title = "{APL} and {Halstead}'s theory: a measuring tool and
some experiments",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "32--47",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807794",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have designed and implemented an algorithm which
measures APL-programs in the sense of software science
by M. H. Halstead /1/. The reader is assumed to be
familiar with the theories of software science. Our
purpose has been to find the best possible algorithm to
automatically analyse large quantities of APL-programs.
We have also used our measuring tool to make some
experiments to find out if APL-programs and workspaces
obey the laws of software science or not. Becasue our
purpose was to analyse large quantities, i.e. hundreds
of programs we have not implemented an algorithm, which
gives exactly correct results from software science
point of view, because this would necessitate manual
clues to the analysing algorithm and thus an
interactive mode of analysis. Instead of it we have
strived for a tool, which carries out the analysis
automatically and as correctly as possible. In the next
section some difficulties encountered in the design of
the measuring algorithm and some inherent limitations
of it are discussed. Section 3 summarises the sources
of errors in the analysis carried out by our algorithm,
while section 4 gives a more detailed description of
the way analysis is carried out. The remaining sections
of this paper report on some experiments we have
carried out using our measuring tool. The purpose of
these experiments has been to evaluate the explaining
power of Halstead's theory in connection of
APL-programs. However, no attempt has been made to
process the results of the experiments statistically.
The results of the experiments have been treated here
only when `obvious' (in)compatibilities between the
theory and the results have been observed. Possible
reasons for the (in)compatibilities are also pointed
out.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Beser:1982:FES,
author = "Nicholas Beser",
title = "Foundations and experiments in software science",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "48--72",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800201.807795",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of papers have appeared on the subject of
software science; claiming the existence of laws
relating the size of a program and the number of
operands and operators used. The pre-eminent theory was
developed by Halstead in 1972. The thesis work focuses
on the examination of Halstead's theory; with an
emphasis on his fundamental assumptions. In particular,
the length estimator was analyzed to determine why it
yields such a high variance; the theoretical
foundations of software science have been extended to
improve the applicability of the critical length
estimator. This elaboration of the basic theory will
result in guidelines for the creation of counting rules
applicable to specific classes of programs, so that it
is possible to determine both when and how software
science can be applied in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schnurer:1982:PAP,
author = "Karl Ernst Schnurer",
title = "{Product Assurance Program Analyzer} ({P.A.P.A.}) a
tool for program complexity evaluation",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "73--74",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807796",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tool has been developed to assist in the software
validation process. P.A.P.A. will measure the
complexity of programs and detect several program
anomalies. The resulting list of analyzed programs is
sorted in order of descending complexity. Since high
complexity and error-proneness are strongly related,
the ``critical'' programs will be found earlier within
the development cycle. P.A.P.A. provides syntax
analyzers for RPG (II/III), PSEUDOCODE (design and
documentation language) and PL/SIII (without macro
language). It may be applied during the design-,
coding- and test phase of software development (e.g.
for design- and code inspections).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gross:1982:CME,
author = "David R. Gross and Mary A. King and Michael R. Murr
and Michael R. Eddy",
title = "Complexity measurement of {Electronic Switching System
(ESS)} software",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "75--85",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807797",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have been developing a tool that measures the
complexity of software: (1) to predict the quality of
software products and (2) to allocate proportionally
more testing resources to complex modules. The software
being measured is real-time and controls telephone
switching systems. This software system is large and
its development is distributed over a period of several
years, with each release providing enhancements and bug
fixes. We have developed a two-stage tool consisting of
a parser and an analyzer. The parser operates on the
source code and produces operator, operand, and
miscellaneous tables. These tables are then processed
by an analyzer program that calculates the complexity
measures. Changes for tuning our Halstead counting
rules involve simple changes to the analyzer only.
During the development there were problems and issues
to be confronted dealing with static analysis and code
metrics. These are also described in this paper. In
several systems we found that more than 80\% of
software failures can be traced to only 20\% of the
modules in the system. The McCabe complexity and some
of Halstead's metrics score higher than the count of
executable statements in their correlations with field
failures. It is reasonable to expect that we could
devote more effort to the review and test of
high-complexity modules and increase the quality of the
software product that we send to the field.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hartman:1982:CTR,
author = "Sandra D. Hartman",
title = "A counting tool for {RPG}",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "86--100",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807798",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Halstead and McCabe metrics were evaluated for
their usefulness in identifying RPG II and RPG III
modules likely to contain a high number of errors. For
this evaluation, commercially available RPG modules
written within IBM were measured and assigned to low,
medium, or high metric value ranges. Conclusions from
this evaluation and RPG counting rules that were
concomitantly developed were presented at SCORE82 and
are summarized in the following report.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Naib:1982:ASS,
author = "Farid A. Naib",
title = "An application of software science to the quantitative
measurement of code quality",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "101--128",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807799",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The error rate of a software application may function
as a measure of code quality. A methodology has been
developed which allows for the accurate prediction of
the error rate and hence code quality prior to an
application's release. Many factors were considered
which could conceivably be related to the error rate.
These factors were divided into two categories: those
factors which vary with time, and those factors which
do not vary with time. Factors which vary with time
were termed environmental factors and included such
items as: number of users, errors submitted to date,
etc. Factors which do not vary with time were termed
internal factors and included Halstead metrics, McCabe
metrics and lines of code.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1982:OCT,
author = "Russ Blake",
title = "Optimal control of thrashing",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "1--10",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035295",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The method of discrete optimal control is applied to
control thrashing in a virtual memory. Certain
difficulties with several previous approaches are
discussed. The mechanism of optimal control is
presented as an effective, inexpensive alternative. A
simple, ideal policy is devised to illustrate the
method. A new feedback parameter, the thrashing level,
is found to be a positive and robust indicator of
thrashing. When applied to a real system, the idealized
policy effectively controlled the virtual memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Babaoglu:1982:HRD,
author = "{\"O}zalp Babao{\u{g}}lu",
title = "Hierarchical replacement decisions in hierarchical
stores",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "11--19",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035296",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the primary motivations for implementing
virtual memory is its ability to automatically manage a
hierarchy of storage systems with different
characteristics. The composite system behaves as if it
were a single-level system having the more desirable
characteristics of each of its constituent levels. In
this paper we extend the virtual memory concept to
within each of the levels of the hierarchy. Each level
is thought of as containing two additional levels
within it. This hierarchy is not a physical one, but
rather an artificial one arising from the employment of
two different replacement algorithms. Given two
replacement algorithms, one of which has good
performance but high implementation cost and the other
poor performance but low implementation cost, we
propose and analyze schemes that result in an overall
algorithm having the performance characteristics of the
former and the cost characteristics of the latter. We
discuss the suitability of such schemes in the
management of storage hierarchies that lack page
reference bits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hagmann:1982:PPR,
author = "Robert B. Hagmann and Robert S. Fabry",
title = "Program page reference patterns",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "20--29",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035298",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a set of measurements of the
memory reference patterns of some programs. The
technique used to obtain these measurements is
unusually efficient. The data is presented in graphical
form to allow the reader to `see' how the program uses
memory. Constant use of a page and sequential access of
memory are easily observed. An attempt is made to
classify the programs based on their referencing
behavior. From this analysis it is hoped that the
reader will gain some insights as to the effectiveness
of various memory management policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bunt:1982:EMP,
author = "R. B. Bunt and R. S. Harbus and S. J. Plumb",
title = "The effective management of paging storage
hierarchies",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "30--38",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035299",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of storage hierarchies in the implementation
of a paging system is investigated. Alternative
approaches for managing a paging storage hierarchy are
described and two are selected for further study ---
staging and migration. Characteristic behaviour is
determined for each of these approaches and a series of
simulation experiments is conducted (using program
reference strings as data) for the purpose of comparing
them. The results clearly show migration to be a
superior approach from the point of view of both cost
and performance. Conclusions are drawn on the
effectiveness of each approach in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hodges:1982:WCP,
author = "Larry F. Hodges and William J. Stewart",
title = "Workload characterization and performance evaluation
in a research environment",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "39--50",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035301",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the process of bench-marking the
diverse research environment that constitutes the
workload of VAX/VMS at the University Analysis and
Control Center at North Carolina State University. The
benchmarking process began with a study of the system
load and performance characteristics over the six-month
period from January to June of 1981. Statistics were
compiled on the number of active users, CPU usage by
individual accounts, and peak load periods. Individual
users were interviewed to determine the nature and
major computing characteristics of the research they
were conducting on VAX. Information from all sources
was compiled to produce a benchmark that closely
paralleled actual system activity.\par
An analytic model was introduced and used in
conjunction with the benchmark data and hardware
characteristics to derive performance measures for the
system. Comparisons with measured system performance
were conducted to demonstrate the accuracy of the
model. The model was then employed to predict
performance as the system workload was increased, to
suggest improvements for the system, and to examine the
effects of those improvements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haring:1982:SDW,
author = "G{\"u}nter Haring",
title = "On state-dependent workload characterization by
software resources",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "51--57",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035302",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method for the characterization of computer workload
at the task level is presented. After having divided
the workload into different classes using a cluster
technique, each cluster is further analysed by state
dependent transition matrices. Thus it is possible to
derive the most probable task sequences in each
cluster. This information can be used to construct
synthetic scripts at the task level rather than the
usual description at the hardware resource level.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bolzoni:1982:PIS,
author = "M. L. Bolzoni and M. C. Calzarossa and P. Mapelli and
G. Serazzi",
title = "A package for the implementation of static workload
models",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "58--67",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035303",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The general principles for constructing workload
models are reviewed. The differences between static and
dynamic workload models are introduced and the
importance of the classification phase for the
implementation of both types of workload models is
pointed out. All the operations required for
constructing static workload models have been connected
in a package. Its main properties and fields of
application are presented. The results of an
experimental study performed with the package on a
batch and interactive workload show its ease of use and
the accuracy of the model obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McDaniel:1982:MSI,
author = "Gene McDaniel",
title = "The {Mesa Spy}: an interactive tool for performance
debugging",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "68--76",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035305",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Spy is a performance evaluation tool for the Mesa
environment that uses a new extension to the PC
sampling technique. The data collection process can use
information in the run time call stack to determine
what code is responsible for the resources being
consumed. The Spy avoids perturbing the user
environment when it executes, provides symbolic output
at the source-language level, and can be used without
recompiling the program to be examined. Depending upon
how much complication the user asks for during data
collection, the Spy steals between 0.3\% and 1.8\% of
the cycles of a fast machine, and between 1.08\% and
35.9\% of the cycles on a slow machine.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "high level language performance debugging; pc
sampling; performance analysis",
}
@Article{Hercksen:1982:MSE,
author = "Uwe Hercksen and Rainer Klar and Wolfgang
Klein{\"o}der and Franz Knei{\ss}l",
title = "Measuring simultaneous events in a multiprocessor
system",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "77--88",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035306",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the hierarchically organized multiprocessor system
EGPA, which has the structure of a pyramid, the
performance of concurrent programs is studied. These
studies are assisted by a hardware monitor
(Z{\"A}HLMONITOR III), which measures not only the
activity and idle states of CPU and channels, but
records the complete history of processes in the CPU
and interleaved I/O activities. The applied method is
distinguished from usual hardware measurements for two
reasons: it puts together the a priori independent
event-streams coming from the different processors to a
well ordered single event stream and it records not
only hardware but also software events. Most useful
have been traces of software events, which give the
programmer insight into the dynamic cooperation of
distributed subtasks of his program. This paper
describes the measurement method and its application to
the analysis of the behaviour of a highly asynchronous
parallel algorithm: the projection of contour lines
from a given point of view and the elimination of
hidden lines.\par
This work is sponsored by the Bundesminister f{\"u}r
Forschung und Technologie (German Federal Minister of
Research and Technology).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1982:SDF,
author = "Erol Gelenbe",
title = "Stationary deterministic flows in discrete systems:
{I}",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "89--101",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a deterministic system whose state space
is the $n$-dimensional first orthant. It may be
considered as a network of (deterministic) queues, a
Karp-Miller vector addition system, a Petrinet, a
complex computer system, etc. Weak assumptions are then
made concerning the asymptotic or limiting behaviour of
the instants at which events are observed across a cut
in the system: these instants may be considered as
`arrival' or `departure' instants. Thus, like in
operational analysis, we deal with deterministic and
observable properties and we need no stochastic
assumptions or restrictions (such as independence,
identical distributions, etc.).\par
We consider however asymptotic or stationary
properties, as in conventional queueing analysis. Under
our assumptions a set of standard theorems are proved:
concerning arrival and departure instant measures,
concerning, `birth and death' type equations, and
concerning Little's formula. Our intention is to set
the framework for a new approach to performance
modelling of computer systems in a context close to
that used in actual measurements, but taking into
account infinite time behaviour in order to take
advantage of the useful mathematical properties of
asymptotic results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baccelli:1982:DBR,
author = "F. Baccelli and E. G. Coffman",
title = "A data base replication analysis using an {M\slash
M\slash m} queue with service interruptions",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "102--107",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A study of file replication policies for distributed
data bases will be approached through the analysis of
an M/M/m queue subjected to state-independent,
preemptive interruptions of service. The durations of
periods of interruption constitute a sequence of
independent, identically distributed random variables.
Independently, the times measured from the termination
of one period of interruption to the beginning of the
next form a sequence of independent, exponentially
distributed random variables. Preempted customers
resume service at the terminations of interrupt
periods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Plateau:1982:MPR,
author = "Brigitte Plateau and Andreas Staphylopatis",
title = "Modelling of the parallel resolution of a numerical
problem on a locally distributed computing system",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "108--117",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern VLSI technology has enabled the development of
high-speed computing systems, based upon various
multiprocessor architecture [1]. We can distinguish
several types of such systems, depending on the control
policies adopted, the interprocessor communication
modes and the degree of resource-sharing. The
efficiency of parallel processing may be significant in
various areas of computer applications; especially,
large numerical applications, such as the solution of
linear systems and differential equations, are marked
by the need of high computation speeds. So, the advance
of parallel processing systems goes together with
research effort in developing efficient parallel
algorithms [2]. The implementation of parallel
algorithms concerns the execution of concurrent
processes, assigned to the processors of the system,
which communicate with each other. The synchronization
needed at process interaction points implies the
existence of waiting delays, which constitute the main
limiting factor of parallel computation. Several
modelling techniques have been developed, that allow
the prediction and verification of parallel systems
performance. The two general approaches followed
concern deterministic models [3] and probabilistic
models. The latter, based on the theory of stochastic
processes [5] \ldots{} are well adapted to the analysis
of complex variable phenomena and provide important
measures concerning several aspects of parallel
processing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bard:1982:MSD,
author = "Yonathan Bard",
title = "Modeling {I/O} systems with dynamic path selection,
and general transmission networks",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "118--129",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035312",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines general transmission networks, of
which I/O subsystems are a special case. By using the
maximum entropy principle, we answer questions such as
what is the probability that a path to a given node is
free when that node is ready to transmit. Systems with
both dynamic and fixed path selection mechanisms are
treated. Approximate methods for large networks are
proposed, and numerical examples are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazowska:1982:MCM,
author = "Edward D. Lazowska and John Zahorjan",
title = "Multiple class memory constrained queueing networks",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "130--140",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035313",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most computer systems have a memory constraint: a
limit on the number of requests that can actively
compete for processing resources, imposed by finite
memory resources. This characteristic violates the
conditions required for queueing network performance
models to be separable, i.e., amenable to efficient
analysis by standard algorithms. Useful algorithms for
analyzing models of memory constrained systems have
been devised only for models with a single customer
class.\par
In this paper we consider the multiple class case. We
introduce and evaluate an algorithm for analyzing
multiple class queueing networks in which the classes
have independent memory constraints. We extend this
algorithm to situations in which several classes share
a memory constraint. We sketch a generalization to
situations in which a subsystem within an overall
system model has a population constraint.\par
Our algorithm is compatible with the extremely time-
and space-efficient iterative approximate solution
techniques for separable queueing networks. This level
of efficiency is mandatory for modelling large
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solution technique; computer system
performance evaluation; memory constraint; population
constraint; queueing network model",
}
@Article{Brandwajn:1982:FAS,
author = "Alexandre Brandwajn",
title = "Fast approximate solution of multiprogramming models",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "141--149",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035314",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models of computer systems with
multiprogramming constraints generally do not possess a
product-form solution in the sense of Jackson.
Therefore, one is usually led to consider approximation
techniques when dealing with such models. Equivalence
and decomposition is one way of approaching their
solution. With multiple job classes, the equivalent
network may be viewed as a set of interdependent
queues. In general, the state-dependence in this
equivalent network precludes a product-form solution,
and the size of its state space grows rapidly with the
number of classes and of jobs per class. This paper
presents two methods for approximate solution of the
equivalent state-dependent queueing network. The first
approach is a manifold application of equivalence and
decomposition. The second approach, less accurate than
the first one, is a fast-converging iteration whose
computational complexity grows near-linearly with the
number of job classes and jobs in a class. Numerical
examples illustrate the accuracy of the two methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solutions; equivalence and decomposition;
multiprogramming; queueing network models; simultaneous
resource possession",
}
@Article{Agrawal:1982:ASM,
author = "Subhash C. Agrawal and Jeffrey P. Buzen",
title = "The aggregate server method for analyzing
serialization delays in computer systems",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "150--150",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035316",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The aggregate server method is an approximate,
iterative technique for analyzing the delays programs
encounter while waiting for entry into critical
sections, non-reentrant subroutines, and similar
software structures that cause processing to become
serialized. The method employs a conventional product
form queueing network comprised of servers that
represent actual I/O devices and processors, plus
additional aggregate servers that represent serialized
processing activity. The parameters of the product form
network are adjusted iteratively to account for
contention among serialized and non-serialized
customers at each physical device.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1982:PAS,
author = "Connie U. Smith and David D. Loendorf",
title = "Performance analysis of software for an {MIMD}
computer",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "151--162",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a technique for modeling and
analyzing the performance of software for an MIMD
(Multiple Instruction Multiple Data) computer. The
models can be used as an alternative to experimentation
for the evaluation of various algorithms and different
degrees of parallelism. They can also be used to study
the tradeoffs involved in increasing the amount of
parallel computation at the expense of increased
overhead for synchronization and communication. The
detection and alleviation of performance bottlenecks is
facilitated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agre:1982:MRN,
author = "Jon R. Agre and Satish K. Tripathi",
title = "Modeling reentrant and nonreentrant software",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "163--178",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A description of software module models for computer
systems is presented. The software module models are
based on a two level description, the software level
and the hardware level, of the computer system. In the
software module level it is possible to model
performance effects of software traits such as
reentrant and nonreentrant type software modules. The
resulting queueing network models are, in general, not
of the product form class and approximation schemes are
employed as solution techniques.\par
An example of a software module model of a hypothetical
computer system is presented. The model is solved with
a simulation program and three approximation schemes.
The approximation results were compared with the
simulation results and some schemes are found to
produce good estimates of the effects of changing from
reentrant to non-reentrant software modules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:1982:OME,
author = "L. T. Wu",
title = "Operational models for the evaluation of degradable
computing systems",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "179--185",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances in multiprocessor technology have
established the need for unified methods to evaluate
computing systems performance and reliability. In
response to this modeling need, this paper considers a
general modeling framework which permits the modeling,
analysis and evaluation of degradable computing
systems. Within this framework, a simple and useful
user-oriented performance variable is identified and
shown to be a proper generalization of the traditional
notions of system performance and reliability.\par
The modeling and evaluation methods considered in this
paper provide a relatively straightforward approach for
integrating reliability and availability measures with
performance measures. The hierarchical decomposition
approach permits the modeling and evaluation of a
computing system's subsystems (e.g., hardware,
software, peripherals, interfaces, user demand systems)
as a whole rather than the traditional methods of
evaluating these subsystems independently. Accordingly,
it becomes possible to evaluate the performance of the
system software and the reliability of the system
hardware simultaneously in order to measure the
effectiveness of the system design. Since the
performance variable introduced permits the
characterization of the system performance according to
the user's view of the systems, the results obtained
represent more accurate assessments of the system's
ability to perform than the existing performance or
reliability measures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marie:1982:ECA,
author = "Raymond A. Marie and Patricia M. Snyder and William J.
Stewart",
title = "Extensions and computational aspects of an iterative
method",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "186--194",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The so-called iterative methods are among a class of
methods that have recently been applied to obtain
approximate solutions to general queueing networks. In
this paper it is shown that if the network contains
feedback loops, then it is more advantageous to
incorporate these loops into the analysis of the
station itself rather than into the analysis of the
complement of the station. We show how this analysis
may be performed for a simple two-phase Coxian server.
Additionally, it is shown that the number of iterations
required to achieve a specified degree of accuracy may
be considerably reduced by using a continuous updating
procedure in which the computed throughputs are
incorporated as soon as they are available, rather than
at the end of an iteration. An efficient computational
scheme is presented to accompany this continuous
updating. Finally a number of examples are provided to
illustrate these features.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neuse:1982:HHA,
author = "Doug Neuse and K. Mani Chandy",
title = "{HAM}: the heuristic aggregation method for solving
general closed queueing network models of computer
systems",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "195--212",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An approximate analytical method for estimating
performance statistics of general closed queueing
network models of computing systems is presented. These
networks may include queues with priority scheduling
disciplines and non-exponential servers and several
classes of jobs. The method is based on the aggregation
theorem (Norton's theorem) of Chandy, Herzog and Woo.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aggregation theorem; analytical models;
approximations; computer system models; general closed
queueing networks; non-local-balance; non-product-form;
performance analysis; priority scheduling",
}
@Article{Eager:1982:PBH,
author = "D. L. Eager and K. C. Sevcik",
title = "Performance bound hierarchies for queueing networks",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "213--214",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In applications of queueing network models to computer
system performance prediction, the computational effort
required to obtain an exact equilibrium solution of a
model may not be justified by the accuracy actually
required. In these cases, there is a need for
approximation or bounding techniques that can provide
the necessary information at reduced cost. This paper
presents Performance Bound Hierarchies (PBHs) for
single class separable queueing networks consisting of
fixed rate and delay service centers. A PBH consists of
a hierarchy of upper (pessimistic) or lower
(optimistic) bounds on mean system residence time. (The
bounds can also be expressed as bounds on system
throughput or center utilizations.) Each successive
member requires more computational effort, and in the
limit, the bounds converge to the exact solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brumfield:1982:EAH,
author = "Jeffrey A. Brumfield and Peter J. Denning",
title = "Error analysis of homogeneous mean queue and response
time estimators",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "215--221",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035325",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Flow balance and homogeneity assumptions are needed to
derive operational counterparts of M/M/1 queue length
and response time formulas. This paper presents
relationships between the assumption errors and the
errors in the queue length and response time estimates.
A simpler set of assumption error measures is used to
derive bounds on the error in the response time
estimate. An empirical study compares actual errors
with their bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harbitter:1982:MTL,
author = "Alan Harbitter and Satish K. Tripathi",
title = "A model of transport level flow control",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "222--232",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A Markov Decision Process model is developed to
analyze buffer assignment at the transport level of the
ARPAnet protocol. The result of the analysis is a
method for obtaining an assignment policy which is
optimal with respect to a delay/throughput/overhead
reward function. The nature of the optimal policy is
investigated by varying parameters of the reward.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1982:CPC,
author = "Erol Gelenbe and Isi Mitrani",
title = "Control policies in {CSMA} local area networks:
{Ethernet} controls",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "233--240",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035328",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An analysis of the random carrier sense multiple
access channel is presented in terms of the behaviour
of each participating station. A detailed model of the
station protocol, including the control policy used in
case collisions, is used to derive the traffic and
throughput of each station. The channel traffic
characteristics are derived from this model and used,
in turn, to derive the traffic parameters entering into
the station model. This provides a solution method for
complete system characteristics for a finite
prespecified set of stations. The approach is then used
to analyse control policies of the type used in
ETHERNET. We show, in particular, that as the
propagation delay becomes small, the specific form of
the control policy tends to have a marginal effect on
network performance. The approach also applies to the
DANUBE and XANTHOS networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:1982:ATF,
author = "Satish K. Tripathi and Alan Harbitter",
title = "An analysis of two flow control techniques",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "241--249",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035329",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queuing models can be useful tools in comparing the
performance characteristics of different flow control
techniques. In this paper the window control mechanism,
incorporated in protocols such as X.25 is compared to
the ARPAnet buffer reservation scheme. Multiclass
queuing models are used to examine message throughput
and delay characteristics. The analysis highlights the
interaction of long and short message (in terms of
length in packets) transmitters under the two flow
control techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{King:1982:MCR,
author = "P. J. B. King and I. Mitrani",
title = "Modelling the {Cambridge Ring}",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "250--258",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035330",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Models for the local area computer network known as
the Cambridge Ring are developed and evaluated. Two
different levels of protocol are considered: the
hardware and the Basic Block. These require different
approaches and, in the second case, an approximate
solution method. A limited comparison between the
Cambridge Ring and another ring architecture --- the
token ring --- is carried out.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marrevee:1982:PRT,
author = "J. Marrevee",
title = "The power of the read track and the need for a write
track command for disk back-up and restore utilities",
journal = j-SIGMETRICS,
volume = "12",
number = "1",
pages = "10--14",
month = dec,
year = "1982/1983",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Perros:1982:MPR,
author = "H. G. Perros",
title = "A model for predicting the response time of an on-line
system for electronic fund transfer",
journal = j-SIGMETRICS,
volume = "12",
number = "1",
pages = "15--21",
month = "Winter",
year = "1982/1983",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Augustin:1982:CCD,
author = "Reinhard Augustin and Klaus-J{\"u}rgen B{\"u}scher",
title = "Characteristics of the {COX}-distribution",
journal = j-SIGMETRICS,
volume = "12",
number = "1",
pages = "22--32",
month = dec,
year = "1982/1983",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perros:1984:QNB,
author = "H. G. Perros",
title = "Queueing networks with blocking: a bibliography",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "8--12",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, queueing networks with blocking have
been studied by researchers from various research
communities such as Computer Performance Modelling,
Operations Research, and Industrial Engineering. In
view of this, related results are scattered throughout
various journals. The bibliography given below is the
result of a first attempt to compile an exhaustive list
of related papers in which analytic investigations
(exact or approximate) or numerical investigations of
queueing networks with blocking have been reported.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeMarco:1984:ASS,
author = "Tom DeMarco",
title = "An algorithm for sizing software products",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "13--22",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041825",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on efforts to develop a cost
forecasting scheme based on a Function Metric called
System BANG. A Function Metric is a quantifiable
indication of system size and complexity derived
directly from a formal statement of system requirement.
Conclusions from a small sample of projects are
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fishwick:1984:PPG,
author = "Paul A. Fishwick and Stefan Feyock",
title = "{PROFGEN}: a procedure for generating machine
independent high-level language profilers",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "27--31",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many of the tools used in software metrics for
evaluating the execution characteristics of a program
are predicated on specific counting rules for operators
and operands [1, 2]. The analyst may use these counting
techniques to determine such program attributes as
estimation of object code size prior to actual
compilation and the relative efficiencies of various
language compilers. Operator/operand measures provide
useful results for certain analyses, but a deficiency
exists in that the data derived from this technique
does not directly reflect the program structure
afforded by a high-level language such as FORTRAN,
Pascal, or Ada. There are many instances where it is
desirable to measure the program at the source level
where the execution data may be directly associated
with specific high level program units such as source
statements and blocks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1984:PML,
author = "M. K. Rajaraman",
title = "Performance measures for a local network",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "34--37",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041827",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parameters that influence the performance of a local
network consisting of three mainframes and an array
processor are identified. Performance measures are
developed for this network and their significance in
the operation and use of the network are discussed.
Some aspects of implementing such measures in a local
network are examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jones:1984:PEJ,
author = "Greg A. Jones",
title = "Performance evaluation of a job scheduler",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "38--43",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "International Business Machines' (IBM) Job Entry
Subsystem 3 (JES 3) is the integral part of the MVS
operating system that is responsible for controlling
all jobs from their entry into the system until their
exit out of the system. JES 3 maintains total awareness
of each job while it is in the system and services the
jobs upon request. These services include: preparing
the job for execution, selecting the job for execution,
and the processing of SYSIN/SYSOUT data. This paper
reports the findings of the performance evaluation
study of JES 3 through the use of a General Purpose
Simulation System (GPSS) model of JES 3 and exhibits
the benefits of using simulation models to study
complex systems such as JES 3. Once the model was
developed, it was used to evaluate the effects of
varying the job scheduler parameters of JES 3 in the
batch job environment. The input workload and service
times for the model were derived from System Management
Facilities (SMF) and Resource Management Facilities
(RMF) data from the modeled system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1984:NCP,
author = "Jon D. Clark and Thomas C. Richards",
title = "A note on the cost-performance ratios of {IBM}'s
{43XX} series",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "44--45",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Pricing policies of computers with various performance
capabilities are usually assumed to be non-linear due
to economies-of-scale. This article analyzes the
cost-performance ratios of a single IBM product line,
the 43XX series and found this performance
characteristic to be surprisingly linear but with great
deal of individual variation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer; cost-performance; performance evaluation",
}
@Article{Coffman:1984:RPP,
author = "E. G. {Coffman, Jr.}",
title = "Recent progress in the performance evaluation of
fundamental allocation algorithms",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "2--6",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our understanding of several allocation algorithms
basic to operating systems and to data base systems has
improved substantially as a result of a number of
research efforts within the past one or two years. The
results have stirred considerable excitement in both
theorists and practitioners. This is not only because
of the inroads made into long-standing problems, but
also because of the surprising nature of the results;
in particular, we refer to proofs that certain
classical algorithms described as approximate are in
fact optimal in a strong probabilistic sense. The work
discussed here will be classified according to the
application areas, archival and dynamic storage
allocation. In both cases we are concerned with the
packing problems that arise in making efficient use of
storage. Equivalents of the archival problems also have
importance in scheduling applications [4]; however, we
shall focus exclusively on the storage allocation
setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1984:FAW,
author = "Domenico Ferrari",
title = "On the foundations of artificial workload design",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "8--14",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The principles on which artificial workload model
design is currently based are reviewed. Design methods
are found wanting for three main reasons: their
resource orientation, with the selection of resources
often unrelated to the performance impact of resource
demands; their avoiding to define an accuracy criterion
for the resulting workload model; and their ignoring
the dynamics of the workload to be modeled. An attempt
at establishing conceptual foundations for the design
of interactive artificial workloads is described. The
problems found in current design methods are taken into
account, and sufficient conditions for the
applicability of these methods are determined. The
study also provides guidance for some of the decisions
to be made in workload model design using one of the
current methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perez-Davila:1984:PIF,
author = "Alfredo de J. Perez-Davila and Lawrence W. Dowdy",
title = "Parameter interdependencies of file placement models
in a {Unix} system",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "15--26",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A file assignment case study of a computer system
running Unix is presented. A queueing network model of
the system is constructed and validated. A modeling
technique for the movement of files between and within
disks is proposed. A detailed queueing network model is
constructed for several file distributions in secondary
storage. The interdependencies between the speed of the
CPU, the swapping activity, the visit ratios and the
multiprogramming level are examined and included in the
modeling technique. The models predict the performance
of several possible file assignments. The various file
assignments are implemented and comparisons between the
predicted and actual performance are made. The models
are shown to accurately predict user response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bunt:1984:MPL,
author = "Richard B. Bunt and Jennifer M. Murphy and Shikharesh
Majumdar",
title = "A measure of program locality and its application",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "28--40",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809311",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although the phenomenon of locality has long been
recognized as the single most important characteristic
of program behaviour, relatively little work has been
done in attempting to measure it. Recent work has led
to the development of an intrinsic measure of program
locality based on the Bradford--Zipf distribution.
Potential applications for such a measure are many, and
include the evaluation of program restructuring methods
(manual and automatic), the prediction of system
performance, the validation of program behaviour
models, and the enhanced understanding of the phenomena
that characterize program behaviour. A consideration of
each of these areas is given in connection with the
proposed measure, both to increase confidence in the
validity of the measure and to illustrate a methodology
for dealing with such problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krzesinski:1984:ILM,
author = "A. Krzesinski and J. Greyling",
title = "Improved lineariser methods for queueing networks with
queue dependent centres",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "41--51",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809312",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Lineariser is an MVA-based technique developed for
the approximate solution of large multiclass product
form queueing networks. The Lineariser is capable of
computing accurate solutions for networks of fixed rate
centres. However, problems arise when the Lineariser is
applied to networks containing centres with queue
dependent service rates. Thus networks exist which seem
well suited (a large number of lightly loaded centres,
large numbers of customers in each closed chain) for
Lineariser solution but whose queue dependent centres
cannot be solved accurately by the Lineariser method.
Examples have also been found where the Lineariser
computes accurate values for the queue lengths, waiting
times and throughputs though the values computed for
the queue length distributions are totally in error.
This paper presents an Improved Lineariser which
computes accurate approximate solutions for multiclass
networks containing an arbitrary number of queue
dependent centres. The Improved Lineariser is based on
MVA results and is therefore simple to implement and
numerically well behaved. The Improved Lineariser has
storage and computation requirements of order (MN)
locations and (MNJ2) arithmetic operations where $M$ is
the number of centres, $N$ the total number of
customers and $J$ the number of closed chains. Results
from 130 randomly generated test networks are used to
compare the accuracy of the standard and Improved
Linearisers. The Improved Lineariser is consistently
more accurate (tolerance errors on all performance
measures less than 2 per cent) than the standard
Lineariser and its accuracy is insensitive to the size
of the network model. In addition, the Improved
Lineariser computes accurate solutions for networks
which cause the standard Lineariser to fail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Approximate solutions; Error analysis; Mean value
analysis; Multiclass queueing networks; Product from
solutions",
}
@Article{Zahorjan:1984:ILD,
author = "John Zahorjan and Edward D. Lazowska",
title = "Incorporating load dependent servers in approximate
mean value analysis",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "52--62",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/800264.809313",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network performance modelling technology has
made tremendous strides in recent years. Two of the
most important developments in facilitating the
modelling of large and complex systems are hierarchical
modelling, in which a single load dependent server is
used as a surrogate for a subsystem, and approximate
mean value analysis, in which reliable approximate
solutions of separable models are efficiently obtained.
Unfortunately, there has been no successful marriage of
these two developments; that is, existing algorithms
for approximate mean value analysis do not accommodate
load dependent servers reliably.\par
This paper presents a successful technique for
incorporating load dependent servers in approximate
mean value analysis. We consider multiple class models
in which the service rate of each load dependent server
is a function of the queue length at that server. In
other words, load dependent center $k$ delivers
``service units'' at a total rate of $ f_k(n_k)$ when $
n_k$ customers are present. We present extensive
experimental validation which indicates that our
algorithm contributes an average error in response
times of less than 1\% compared to the (much more
expensive) exact solution.\par
In addition to the practical value of our algorithm,
several of the techniques that it employs are of
independent interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agrawal:1984:RTP,
author = "Subhash C. Agrawal and Jeffrey P. Buzen and Annie W.
Shum",
title = "{Response Time Preservation}: a general technique for
developing approximate algorithms for queueing
networks",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "63--77",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809314",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Response Time Preservation (RTP) is introduced as a
general technique for developing approximate analysis
procedures for queueing networks. The underlying idea
is to replace a subsystem by an equivalent server whose
response time in isolation equals that of the entire
subsystem in isolation. The RTP based approximations,
which belong to the class of decomposition
approximations, can be viewed as a dual of the Norton's
Theorem approach for solving queueing networks since it
matches response times rather than throughputs. The
generality of the RTP technique is illustrated by
developing solution procedures for several important
queueing systems which violate product form
assumptions. Examples include FCFS servers with general
service times, FCFS servers with different service
times for multiple classes, priority scheduling, and
distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mussi:1984:EPE,
author = "Ph. Mussi and Ph. Nain",
title = "Evaluation of parallel execution of program tree
structures",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "78--87",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809315",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We define and evaluate two policies (NA-policy,
A-policy) for parallel execution of program tree
structures. Via a probabilistic model we analytically
determine, for each policy, the Laplace--Stieltjes
transform for the tree processing time distribution.
The acceleration of the program execution time achieved
when adding processors to a single processor
environment, is computed and plotted for each policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sanguinetti:1984:POP,
author = "John Sanguinetti",
title = "Program optimization for a pipelined machine a case
study",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "88--95",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/800264.809316",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Amdahl 580 processor is a pipelined processor
whose performance can be affected by characteristics of
the instructions it executes. This paper describes
certain optimizations made to a set of system software
routines during their development. The optimization
effort was driven by the execution frequencies of
common paths through the programs in question, and by
the execution characteristics of those paths, as shown
by a processor simulator. Path optimization itself was
done with both general program optimization techniques
and with techniques specific to the particular
characteristics of the 580's pipeline. Overall, the
average execution time for these routines was reduced
by over 50\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Turner:1984:PDB,
author = "Rollins Turner and Jeffrey Schriesheim and Indrajit
Mitra",
title = "Performance of a {DECnet} based disk block server",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "96--104",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This report describes an experimental disk block
server implemented for the RSX-11M Operating System
using DECnet. The block server allows user programs on
one system to access files on a disk physically located
on a different system. The actual interface is at the
level of physical blocks and IO transfers. Results of
basic performance measurements are given, and explained
in terms of major components. Performance predictions
are made for servers of this type supporting more
complex workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stavenow:1984:TDC,
author = "Bengt Stavenow",
title = "Throughput-delay characteristics and stability
considerations of the access channel in a mobile
telephone system",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "105--112",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper a performance study of the access
channel in a cellular mobile telephone system /1/ is
presented. The method used in the Cellular System for
multiplexing the population of mobile terminals over
the access channel is a hybrid between the methods
known as CSMA/CD and BTMA. In the paper we extend an
analysis of CSMA/CD to accommodate the function of the
particular random multiaccess protocol. Results are
shown which illustrate the equilibrium channel
performance and the approximate
stability-throughput-delay tradeoff. Finally an
estimate of the average message delay is given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williams:1984:PQD,
author = "Elizabeth Williams",
title = "Processor queueing disciplines in distributed
systems",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "113--119",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A distributed program consists of processes, many of
which can execute concurrently on different processors
in a distributed system of processors. When several
processes from the same or different distributed
programs have been assigned to a processor in a
distributed system, the processor must select the next
process to run. The following two questions are
investigated: What is an appropriate method for
selecting the next process to run? Under what
conditions are substantial gains in performance
achieved by an appropriate method of selection?
Standard processor queueing disciplines, such as
first-come-first-serve and round-robin-fixed-quantum,
are studied. The results for four classes of queueing
disciplines tested on three problems are presented.
These problems were run on a testbed, consisting of a
compiler and simulator used to run distributed programs
on user-specified architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stephens:1984:CBH,
author = "Lindsey E. Stephens and Lawrence W. Dowdy",
title = "Convolutional bound hierarchies",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "120--133",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809320",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The time required to find the exact solution of a
product-form queueing network model of a computer
system can be high. Faster and cheaper methods of
solution, such as approximations, are natural
alternatives. However, the errors incurred when using
an approximation technique should be bounded. Several
recent techniques have been developed which provide
solution bounds. These bounding techniques have the
added benefit that the bounds can be made tighter if
extra computational effort is expended. Thus, a smooth
tradeoff of cost and accuracy is available. These
techniques are based upon mean value analysis. In this
paper a new bounding technique based upon the
convolution algorithm is presented. It provides a
continuous range of cost versus accuracy tradeoffs for
both upper and lower bounds. The bounds produced by the
technique converge to the exact solution as the
computational effort approaches that of convolution.
Also, the technique may be used to improve any existing
set of bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suri:1984:NBB,
author = "Rajan Suri and Gregory W. Diehl",
title = "A new `building block' for performance evaluation of
queueing networks with finite buffers",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "134--142",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new `building block', for analyzing
queueing networks. This is a model of a server with a
variable buffer-size. Such a model enables efficient
analysis of certain queueing networks with blocking due
to limited buffer spaces, since it uses only
product-form submodels. The technique is extensively
tested, and found to be reasonably accurate over a wide
range of parameters. Several examples are given,
illustrating practical situations for which our model
would prove to be a useful performance analysis tool,
specially since it is simple to understand, and easy to
implement using standard software for closed queueing
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Approximate analysis; Blocking; Performance modelling;
Performance prediction; Product form networks; Queueing
networks",
}
@Article{Lavenberg:1984:SAE,
author = "Stephen S. Lavenberg",
title = "A simple analysis of exclusive and shared lock
contention in a database system",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "143--148",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a probabilistic model of locking in a
database system in which an arriving transaction is
blocked and lost when its lock requests conflict with
the locks held by currently executing transactions.
Both exclusive and shared locks are considered. We
derive a simple asymptotic expression for the
probability of blocking which is exact to order $ 1 / N
$ where $N$ is the number of lockable items in the
database. This expression reduces to one recently
derived by Mitra and Weinberger for the special case
where all locks are exclusive.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Becker:1984:MMS,
author = "S. T. Becker and K. M. Rege and B. Sengupta",
title = "A modeling methodology for sizing a computer based
system in a netted environment",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "149--157",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a hybrid model, combining both
analytical and simulation techniques, which was
developed to study the performance of a netted computer
based system. The computer based system that was
modeled is the Facility Assignment and Control System
(FACS). This system is presently being deployed within
several Bell Operating Companies to inventory and
assign central office and outside plant facilities. A
key feature of the model is its ability to characterize
the dynamic nature of FACS. An understanding of this
dynamic nature is necessary in establishing important
operational guidelines such as allowable CPU
utilization, levels of multiprogramming and priority of
transaction processing. In addition, the model allows
the user to investigate the sensitivity of the system
to a wide range of conditions. Typical study items
could include the effect of various load scenarios,
ability of the system to meet performance objectives,
and different hardware configurations. As part of this
paper, both the practical aspects of modeling a netted
computer based system and the theoretical development
of the hybrid model are considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peachey:1984:EIS,
author = "Darwyn R. Peachey and Richard B. Bunt and Carey L.
Williamson and Tim B. Brecht",
title = "An experimental investigation of scheduling strategies
for {UNIX}",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "158--166",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The scheduler used in an operating system is an
important factor in the performance of the system under
heavy load. This paper describes the scheduling
philosophy employed in the UNIX operating system and
outlines the standard scheduling strategies. Modified
strategies which address deficiencies in the standard
strategies are described. The effectiveness of these
modified strategies is assessed by means of performance
experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:1984:PEI,
author = "Daniel A. Menasc{\'e} and Leonardo Lellis P. Leite",
title = "Performance evaluation of isolated and interconnected
token bus local area networks",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "167--175",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809325",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The token bus based local area network, REDPUC,
designed and implemented at the Pont{\'\i}ficia
Universidade Cat{\'o}lica do Rio de Janeiro is briefly
described. Analytic models are presented, which allow
one to obtain an approximation for the average packet
delay, as well as exact upper and lower bounds for the
same performance measure. A performance evaluation of
interconnected local networks is also given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agrawal:1984:UAS,
author = "Subhash C. Agrawal and Jeffrey P. Buzen and Ashok K.
Thareja",
title = "A Unified Approach to Scan Time Analysis of Token
Rings and Polling Networks",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "176--185",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809326",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Token rings and multipoint polled lines are two widely
used network interconnection techniques. The general
concept of cyclic allocation processes is defined and
used to characterize token passing and polling in these
networks. Scan time, the time to poll all nodes at
least once, is an important quantity in the response
time analysis of such networks. We derive expressions
for the mean and variance of scan times using a direct,
operational approach. Resulting expressions are general
and are applicable to both exhaustive and
non-exhaustive service. The effect of higher level
protocols is easily incorporated in the analysis via
calculations of constituent quantities. The expression
for mean scan time is exact and depends only on the
means of message transmission times and arrival rates.
The approximate analysis of variance takes into account
the correlation between message transmissions at
different nodes. Expected level of accuracy is
indicated by an example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwajn:1984:EAM,
author = "Alexandre Brandwajn and William M. McCormack",
title = "Efficient approximation for models of multiprogramming
with shared domains",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "186--194",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models of multiprogramming systems
with memory constraints and multiple classes of jobs
are important in representing large commercial computer
systems. Typically, an exact analytical solution of
such models is unavailable, and, given the size of
their state space, the solution of models of this type
is approached through simulation and/or approximation
techniques. Recently, a computationally efficient
iterative technique has been proposed by Brandwajn,
Lazowska and Zahorjan for models of systems in which
each job is subject to a separate memory constraint,
i.e., has its own memory domain. In some important
applications, it is not unusual, however, to have
several jobs of different classes share a single memory
``domain'' (e.g., IBM's Information Management System).
We present a simple approximate solution to the shared
domain problem. The approach is inspired by the
recently proposed technique which is complemented by a
few approximations to preserve the conceptual
simplicity and computational efficiency of this
technique. The accuracy of the results is generally in
fair agreement with simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bondi:1984:RTP,
author = "Andr{\'e} B. Bondi and Jeffrey P. Buzen",
title = "The response times of priority classes under
preemptive resume in {M/G/m} queues",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "195--201",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/800264.809328",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Approximations are given for the mean response times
of each priority level in a multiple-class multiserver
M/G/m queue operating under preemptive resume
scheduling. The results have been tested against
simulations of systems with two and three priority
classes and different numbers of servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1984:AQN,
author = "Alexander Thomasian and Paul Bay",
title = "Analysis of {Queueing Network Models} with population
size constraints and delayed blocked customers",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "202--216",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809329",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing Network Models --- QNM's with population size
constraints and delayed blocked customers occur due to
MultiProgramming Level --- MPL constraints in computer
systems and window flow-control mechanisms in Computer
Communication Networks --- CCN's. The computational
cost of existing algorithms is unacceptable for large
numbers of chains and high population sizes. A fast
approximate solution technique based on load
concealment is presented to solve such QNM's. The
solution procedure is non-iterative in the case of
fixed rate Poisson arrivals, while iteration is
required in the case of quasi-random arrivals. Each
iteration requires the solution of a single chain
network of queues comprised of stations visited by each
chain. We then present an algorithm to detect saturated
chains and determine their maximum throughput. A fast
solution algorithm due to Reiser for closed chains is
also extended to the case of quasi-random arrivals. The
accuracy of the proposed solution techniques is
compared to previous techniques by applying it to a
test case, reported in the literature, and a set of
randomly generated examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaffney:1984:IEP,
author = "John E. Gaffney",
title = "Instruction entropy, a possible measure of
program\slash architecture compatibility",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "13--18",
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Sauer:1984:NSS,
author = "Charles H. Sauer",
title = "Numerical solution of some multiple chain queueing
networks",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "19--28",
month = dec,
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1984:SCS,
author = "Alexander Thomasian and Kameshwar Gargeya",
title = "Speeding up computer system simulations using
hierarchical modeling",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "34--39",
month = dec,
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elshoff:1984:PMP,
author = "James L. Elshoff",
title = "The {PEEK} measurement program",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "40--53",
month = "Winter",
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1984:STM,
author = "Anna H{\'a}c",
title = "A survey of techniques for the modeling of
serialization delays in computer systems",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "54--56",
month = dec,
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mosleh:1985:BPR,
author = "Ali Mosleh and E. Richard Hilton and Peter S. Browne",
title = "{Bayesian} probabilistic risk analysis",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "5--12",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041839",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As modern business and financial institutions have
come to rely more and more on large scale computers for
management support, the magnitude of the risks and
their potential consequences has increased
correspondingly. In addition, large systems involving
multiprocessing, resource sharing, and distributed
processing have given rise to a new generation of risks
due to the increased vulnerabilities of such large
scale systems and the potential for fraudulent or
malicious misuse of their resources. Somehow, these
risks must be managed since either deliberate or
accidental impairment of these large scale systems can
have serious consequences for the business. That is,
threats must be identified, and the likelihood of their
occurrences and the elements of the system vulnerable
to each of these threats must be established. Any
program for risk management must begin with a risk
analysis to compare the vulnerabilities in order to
pinpoint and rank the system's weaknesses and to
provide a guide for the cost-effective, systematic
reduction of the probability of the system's being
subverted or otherwise impaired.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gong:1985:CMB,
author = "Huisheng Gong and Monika Schmidt",
title = "A complexity measure based on selection and nesting",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "14--19",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041840",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many concepts concerning the quantification of program
complexity have been developed during the last few
years. One of the most accepted and easy-to-apply
complexity measures, McCabe's cyclomatic number, has
been discussed and improved in several studies. The
cyclomatic number only considers the decision structure
of a program. Therefore, this paper proposes a new
method for calculating program complexity, the concept
of postdomination. This takes into account the degree
of nesting of a program. Combining this method and the
cyclomatic number, a new complexity measure will be
defined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cyclomatic number; degree of nesting; forward
dominance; program complexity",
}
@Article{Knudson:1985:PMS,
author = "Michael E. Knudson",
title = "A performance measurement and system evaluation
project plan proposal",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "20--31",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041841",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This document is an outline for a performance
measurement and evaluation effort. Performance
measurements consist of producing data showing
frequency and execution times for components of
computer systems. Components implies: (1) hardware, (2)
ucode, (3) macrocode, (4) applications software,
(5)systems (e.g., utilities in an operating-system
environment). Evaluation can be broken down into
several areas. Principle areas of interest are
comparative performance evaluation and an analysis of a
system's structure/behavior. Comparative evaluation
consists of: relative performance measurements of
different machines; a summary of collected data; and an
analysis of a system's structure, including the
production of data describing the interrelationship of
system components. This data may be narrative, but the
preferred technique is a graphical presentation showing
component relationships.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ejiogu:1985:SMS,
author = "Lem O. Ejiogu",
title = "A simple measure of software complexity",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "33--47",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Every science mast have its own method of
investigation built on a sound foundation that is
empirical, justifiable and verifiable. Software
metrics, too, can benefit from this principle. A
complex aggregate of tools, ideas, methodologies,
programming languages, and varieties of applications go
into the development, design, manufacture and
maintenance of software. The combinations impose
another level of complexity on software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eager:1985:CRI,
author = "Derek L. Eager and Edward D. Lazowska and John
Zahorjan",
title = "A comparison of receiver-initiated and
sender-initiated adaptive load sharing (extended
abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "1--3",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One goal of locally distributed systems is to
facilitate resource sharing. Most current locally
distributed systems, however, share primarily data,
data storage devices, and output devices; there is
little sharing of computational resources. Load sharing
is the process of sharing computational resources by
transparently distributing the system workload. System
performance can be improved by transferring work from
nodes that are heavily loaded to nodes that are lightly
loaded. Load sharing policies may be either static or
adaptive. Static policies use only information about
the average behavior of the system; transfer decisions
are independent of the actual current system state.
Static policies may be either deterministic (e.g.,
``transfer all compilations originating at node $A$ to
server $B$'') or probabilistic (e.g., ``transfer half
of the compilations originating at node $A$ to server
$B$, and process the other half locally''). Numerous
static load sharing policies have been proposed. Early
studies considered deterministic rules [Stone 1977,
1978; Bokhari 1979]. More recently, Tantawi and Towsley
[1985] have developed a technique to find optimal
probabilistic rules. The principal advantage of static
policies is their simplicity: there is no need to
maintain and process system state information. Adaptive
policies, by contrast, are more complex, since they
employ information on the current system state in
making transfer decisions. This information makes
possible significantly greater performance benefits
than can be achieved under static policies. This
potential was clearly indicated by Livny and Melman
[1982], who showed that in a network of homogeneous,
autonomous nodes there is a high probability that at
least one node is idle while tasks are queued at some
other node, over a wide range of network sizes and
average node utilizations. In previous work [Eager,
Lazowska \& Zahorjan 1984] we considered the
appropriate level of complexity for adaptive load
sharing policies. (For example, how much system state
information should be collected, and how should it be
used in making transfer decisions?) Rather than
advocating specific policies, we considered fairly
abstract strategies exhibiting various levels of
complexity. We demonstrated that the potential of
adaptive load sharing can in fact be realized by quite
simple strategies that the use only small amounts of
system state information. This result is important
because of a number of practical concerns regarding
complex policies: the effect of the overhead required
to administer a complex policy, the effect of the
inevitable inaccuracies in detailed information about
system state and workload characteristics, and the
potential for instability. (We consciously use the
phrase ``load sharing'' rather than the more common
``load balancing'' to highlight the fact that load
balancing, with its implication of attempting to
equalize queue lengths system-wide, is not an
appropriate objective.) Adaptive load sharing policies
can employ either centralized or distributed control.
Distributed control strategies can be of two basic
types (although intermediate strategies also are
conceivable): sender-initiated (in which congested
nodes search for lightly loaded nodes to which work may
be transferred), and receiver-initiated (in which
lightly loaded nodes search for congested nodes from
which work may be transferred). Our earlier paper
considered distributed, sender-initiated policies --- a
sufficiently rich class to allow us to answer the
fundamental questions of policy complexity that we were
addressing. In the course of understanding the reasons
for the degradation of these policies at high system
loads, we were led to consider receiver-initiated
policies as a possible alternative. The comparison of
receiver-initiated and sender-initiated adaptive load
sharing is the purpose of the present paper. There have
been several experimental studies, using prototypes and
simulation models, of specific (typically fairly
complex) adaptive load sharing policies [Bryant \&
Finkel 1981; Livny \& Melman 1982; Kreuger \& Finkel
1984; Barak \& Shiloh 1984]. Both sender-initiated
policies and receiver-initiated policies have been
considered. However, there has not previously been a
rigorous comparison of these two strategies. Such a
comparison is made difficult by the problem of choosing
appropriate representative policies of each type, and
by the potentially quite different costs incurred in
effecting transfers. (Receiver-initiated policies
typically will require the transfer of executing tasks,
which incurs substantial costs in most systems [Powell
\& Miller 1983]. Sender-initiated policies naturally
avoid such costly transfers, since tasks can be
transferred upon arrival, prior to beginning
execution.) Our present paper is similar to our
previous work in that our purpose, rather than to
advocate specific policies, is to address a fundamental
question concerning policies in general: How should
system state information be collected and load sharing
actions initiated --- by potential receivers of work,
or by potential senders of work? In studying this
question we consider a set of abstract policies that
represent only the essential aspects of
receiver-initiated and sender-initiated load sharing
strategies. These policies are investigated using
simple analytic models. Our objective is not to
determine the absolute performance of particular load
sharing policies, but rather to gain intuition
regarding the relative merits of the different
approaches under consideration. We represent locally
distributed systems as collections of identical nodes,
each consisting of a single processor. The nodes are
connected by a local area network (e.g., an Ethernet).
All nodes are subjected to the same average arrival
rate of tasks, which are of a single type. In contrast
to most previous papers on load sharing, we represent
the cost of task transfer as a processor cost rather
than as a communication network cost. It is clear from
measurement and analysis [Lazowska et al. 1984] that
the processor costs of packaging data for transmission
and unpackaging it upon reception far outweigh the
communication network costs of transmitting the data.
We study three abstract load sharing policies,
comparing their performance to each other and to that
of a system in which there is no load sharing. The
Sender policy is used a representative of
sender-initiated load sharing strategies. The Receiver
and Reservation policies are used as representatives of
receiver-initiated load sharing strategies; unlike the
Receiver policy, the Reservation policy will transfer
only newly arriving tasks. In a bit more detail: Sender
In our earlier work concerning the appropriate level of
complexity for adaptive load sharing schemes, we
identified two sub-policies of sender-initiated
strategies. The transfer policy determines whether a
task should be processed locally or remotely. The
location policy determines to which node a task
selected for transfer should be sent. In that previous
study, we considered threshold transfer policies, in
which each node uses only local state information. An
attempt is made to transfer a task originating at a
node if and only if the number of tasks already in
service or waiting for service (the node queue length)
is greater than or equal to some threshold T. We
considered various location policies spanning a range
of complexity. We found that the use of a complex
location policy yields only slight improvement over the
use of a simple location policy that, like the transfer
policy, uses threshold information. In this threshold
location policy, a node is selected at random and
probed to determine whether the transfer of a task to
that node would place the node above the threshold T.
If not, then the task is transferred. If so, then
another node is selected at random and probed in the
same manner. This continues until either a suitable
destination node is found, or the number of probes
reaches a static probe limit, Lp. In the latter case,
the originating node must process the task. (The use of
probing with a fixed limit, rather than broadcast,
ensures that the cost of executing the load sharing
policy will not be prohibitive even in large networks.
The performance of this policy was found to be
surprisingly insensitive to the choice of probe limit:
the performance with a small probe limit, e.g., 3 or 5,
is nearly as good as the performance with a large probe
limit, e.g., 20.) The sender-initiated policy with a
threshold transfer policy and a threshold location
policy was found to yield performance not far from
optimal, particularly at light to moderate system
loads. For this reason, and because of its simplicity,
we choose this policy to serve as the representative of
sender-initiated strategies for the comparison that is
the subject of the present paper, and term it here the
Sender policy. Receiver To facilitate comparison
between sender-initiated strategies and
receiver-initiated strategies, a representative policy
of the latter class should be as similar as possible to
the Sender policy. In particular, it should utilize
threshold-type state information, and have a bound Lp
on the number of remote nodes whose state can be
examined when making a task transfer decision. In the
Receiver policy, a node attempts to replace a task that
has completed processing if there are less than $T$
tasks remaining at the node. A remote node is selected
at random and probed to determine whether the transfer
of a task from that node would place its queue length
below the threshold value T. If not, and if the node is
not already in the process of transferring a task, a
task is transferred to the node initiating the probe.
Otherwise, another node is selected at random and
probed in the same manner. This continues until either
a node is found from which a task can be obtained, or
the number of probes reaches a static probe limit, Lp.
In the latter case, the node must wait until another
task departs before possibly attempting again to
initiate a transfer. (This is completely analogous to
the operation of the Sender policy, in which a node
that fails to find a suitable destination to which to
transfer a task must wait until another task arrives
before attempting again to initiate a transfer.) The
Receiver policy with T=1 has been studied using a
simulation model by Livny and Melman [1982], who term
it the ``poll when idle algorithm''. Reservation The
Reservation policy, like the Sender policy but in
contrast to the Receiver policy, will only transfer
newly arriving tasks. This may be advantageous in
multiprogramming systems in which nodes attempt to give
each of the tasks present some share of the total
available processing power. If the Receiver policy is
used in such a system, almost all task transfers will
involve executing tasks, and may be substantially more
costly than transfers of non-executing tasks. In the
Reservation policy, as in the Receiver policy, a node
attempts to replace a task that has completed
processing if there are less than $T$ tasks remaining
at the node. A remote node is selected at random and
probed to determine whether the transfer of the next
task to originate at that node would place its queue
length below the threshold value T. If not, and if no
other ``reservation'' is pending for this node, then
this next arrival is ``reserved'' by the probing node;
it is transferred upon arrival if no other tasks have
arrived at the probing node by that time. If the
reservation attempt is not successful, another node is
selected at random and probed at the same manner. This
continues until either a node is found at which the
next arrival can be reserved, or the number of probes
reaches a static probe limit, Lp. In the latter case,
the node must wait until another task departs before
possibly attempting again to reserve a task. Our
evaluation of this policy is optimistic. (Even this
optimistic evaluation predicts unsatisfactory
performance.) At the time a reservation is attempted,
we assume that the probed node can ``see into the
future'' to the arrival time of the (potentially)
reserved task. The reservation is made only if the
probed node will be above threshold at that time. Also,
even when a reservation request is successful, the
probed node considers this next arrival as ineligible
for other reservation requests only if it will actually
be transferred to the node holding the reservation.
Finally, we assume that the probability that a task
will be processed locally rather than transferred,
given that it arrives when the node queue length is at
or over threshold, is independent of the prior history
of the task arrivals and departures. In fact, this
probability is higher for tasks with shorter
interarrival times. Many of the results of our study
are illustrated in the accompanying figure. While the
figure illustrates specific choices of parameter
values, the results are quite robust with respect to
these choices; a substantial part of the full paper is
devoted to demonstrating this robustness. The results
include: Both receiver-initiated and sender-initiated
policies offer substantial performance advantages over
the situation in which no load sharing is attempted
(shown as M/M/1 in the figure). Sender-initiated
policies are preferable to receiver-initiated policies
at light to moderate system loads. Receiver-initiated
policies are preferable at high system loads, but only
if the costs of task transfer under the two strategies
are comparable. If the cost of task transfers under
receiver-initiated policies is significantly greater
than under sender-initiated policies (for example,
because executing tasks must be transferred), then
sender-initiated policies provide uniformly better
performance. Modifying receiver-initiated policies to
transfer only newly-arrived tasks (so as to avoid the
cost of transferring executing tasks) yields
unsatisfactory performances.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelernter:1985:ACP,
author = "David Gelernter and Sunil Podar and Hussein G. Badr",
title = "An adaptive communications protocol for network
computers (extended abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "4--5",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A network computer is a collection of computers
designed to function as one machine. On a network
computer, as opposed to a multiprocessor, constituent
subcomputers are memory-disjoint and communicate only
by some form of message exchange. Ensemble
architectures like multiprocessors and network
computers are of growing interest because of their
capacity to support parallel programs, where a parallel
program is one that is made up of many
simultaneously-active, communicating processes.
Parallel programs should, on an appropriate
architecture, run faster than sequential programs, and,
indeed, good speed-ups have been reported in parallel
programming experiments in several domains, amongst
which are AI, numerical problems, and system
simulation. Our interest lies in network computers,
particularly ones that range in size from several
hundred nodes to several thousand. Network computers
may be organized in either of two basic ways: their
nodes may communicate over a shared bus (or series of
buses), as in S/Net; or over point-to-point links, as
in Cosmic Cube and the Transputer Network. The work to
be presented deals with the point-to-point class, the
elements of which we shall refer to as ``linked
networks''. Linked networks face a fundamental
communication problem. Unless they are completely
connected (which is rarely possible), two communicating
nodes will not necessarily be connected by a single
link. Messages between nodes must therefore, in
general, travel over several links and be processed by
several intermediate nodes. Communication delays
increase with the length of the traveled path. Network
computer designers therefore provide networks the
diameters of which are small relative to their size,
and network operating systems will attempt to place
communicating processes as close to each other as
possible. We present a communication protocol for
linked networks that was designed specifically for
network computers. Staged Circuit Switching is a
communication protocol that combines aspects of
store-and-forwarding with aspects of circuit switching,
where circuit switching refers to the class of
protocols in which a communicating source and
destination first construct a dedicated path or circuit
between them, then communicate directly over this path.
The path may be a physical connection, as in
spaced-switched circuit-switching, or a series of
dedicated slots in time-division multiplexing switches,
as in time-switching protocols. The
stage-circuit-switching design is strongly related to
spaced-switched circuit-switching and encompasses both
the protocol itself and a communication architecture to
support it. In staged circuit switching, each message
constructs for itself the longest physical circuit that
it can without waiting for links. When a message is to
be sent, a header that records the message's source and
destination is sent propagating through the network
towards the destination node; the header seizes each
free link along its path and incorporates it into a
growing circuit. When it meets a busy link, or arrives
at its destination, circuit building stops, the
message's data portion is transmitted and acknowledged
over the existing circuit, and the circuit is released.
A message that has not arrived at its destination then
gathers itself together and plunges onward in the same
fashion. In an empty network then, staged circuit
switching is the same as circuit switching: each
message is transmitted over a direct circuit from
source to destination. In a heavily loaded network, it
is the same as store-and-forwarding: each next-link is
busy, each circuit is therefore only one link long, and
the message proceeds hop by hop. The protocol combines
the speed benefits of circuit switching at light
traffic loads, with the high bandwidth advantages of
store-and-forwarding at heavy loads. We have carried
out extensive simulation studies to evaluate the
dynamics of staged circuit switching from the point of
view of message delays, throughput, circuit lengths,
efficiency, implementation, and so on. The studies were
implemented in the context of a toroidal topology of
diameter 32, yielding a 1024-node network. Uniform
source-to-destination distributions were used. Both the
topology and the source-to-destination distributions
are analyzed. An analysis of network saturation based
on mean values is also given. Staged circuit switching
unambiguously emerges as a strong protocol with
superior performance characteristics than either
classical store-and-forwarding or circuit switching,
particularly with regards to adaptability to varying
network loads and in providing a consistently high
effective network bandwidth. On the basis of our
results the protocol is proposed as a suitable
candidate for linked networks. Its attractiveness is
further enhanced by its potential ability to
continually reconfigure the network dynamically at
runtime to optimize for observed traffic patterns.
Heavily-used circuits may be left in place over longer
periods than a single message transmission. In this
way, the system constantly rearranges the network
topology in order to bring heavily-communicating
distant nodes closer together, thereby acting as a
``communication cache''. A ``cache hit'' would
correspond to finding the desired destination node one
hop away from a given source. Effective exploitation of
this capability is the subject of ongoing research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1985:ADC,
author = "Erol Gelenbe and David Finkel and Satish K. Tripathi",
title = "On the availability of a distributed computer system
with failing components",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "6--13",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a model for distributed systems with
failing components. Each node may fail and during its
recovery the load is distributed to other nodes that
are operational. The model assumes periodic
checkpointing for error recovery and testing of the
status of other nodes for the distribution of load. We
consider the availability of a node, which is the
proportion of time a node is available for processing,
as the performance measure. A methodology for
optimizing the availability of a node with respect to
the checkpointing and testing intervals is given. A
decomposition approach that uses the steady-state flow
balance condition to estimate the load at a node is
proposed. Numerical examples are presented to
demonstrate the usefulness of the technique. For the
case in which all nodes are identical, closed form
solutions are obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conway:1985:RNE,
author = "A. E. Conway and N. D. Georganas",
title = "{RECAL} --- a new efficient algorithm for the exact
analysis of multiple-chain closed queueing networks
(abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "14--14",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "RECAL, a Recursion by Chain Algorithm for computing
the mean performance measures of product-form
multiple-chain closed queueing networks, is presented.
It is based on a new recursive expression which relates
the normalization constant of a network with $r$ closed
routing chains to those of a set of networks having $
(r - l)$ chains. It relies on the artifice of breaking
down each chain into constituent sub-chains that each
have a population of one. The time and space
requirements of the algorithm are shown to be
polynomial in the number of chains. When the network
contains many routing chains the proposed algorithm is
substantially more efficient than the convolution or
mean value analysis algorithms. The algorithm therefore
extends the range of queueing networks which can be
analyzed efficiently by exact means. A numerical
example is given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balbo:1985:MPS,
author = "G. Balbo and S. C. Bruell and S. Ghanta",
title = "Modeling priority schemes",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "15--26",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop Generalized Stochastic Petri Net models for
several priority queueing disciplines. The building
blocks of these models are explained and many variants
are easily derivable from them. We then combine these
building blocks with product-form queueing network
models. Numerical results are provided that illustrate
the effectiveness of the method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "generalized stochastic Petri nets; head-of-the-line;
preemptive resume; priorities; product-form queueing
networks; reorientation; time-out",
}
@Article{Walstra:1985:NNQ,
author = "Robbe J. Walstra",
title = "Nonexponential networks of queues: a maximum entropy
analysis",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "27--37",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317807",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We will propose a new, iterative method for
approximately analyzing closed networks of queues with
nonexponential service time distributions and FCFS
scheduling. Our method is based on the Principle of
Maximum Entropy and produces results which, first, are
consistent with the fundamental Work Rate Theorem and,
second, are exact for separable networks of queues.
Considering accuracy and execution time
characteristics, our method offers a viable alternative
to Marie's homogeneous approximation method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Calzarossa:1985:SSC,
author = "Maria Calzarossa and Domenico Ferrari",
title = "A sensitivity study of the clustering approach to
workload modeling (extended abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "38--39",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a paper published in 1984 [Ferr84], the validity of
applying clustering techniques to the design of an
executable model for an interactive workload was
discussed. The following assumptions, intended not to
be necessarily realistic but to provide sufficient
conditions for the applicability of clustering
techniques, were made: The system whose workload is to
be modeled is an interactive system, and its
performance can be accurately evaluated by solving a
product-form closed queueing network model. The
behavior of each interactive user can be adequately
modeled by a probabilistic graph (called a user
behavior graph); in such a graph, each node represents
an interactive command type, and the duration of a
user's stay in the node probabilistically equals the
time the user spends typing in a command of that type,
waiting for the system's response, and thinking about
what command should be input next. The interactive
workload to be modeled is stationary, and the workload
model to be constructed is intended to reproduce its
global characteristics (not those of some brief excerpt
from it exhibiting peculiar dynamics), hence to be
stationary as well. It was shown in [Ferr84] that,
under these assumptions, clustering command types
having the same probabilistic resource demands does not
affect the values of the performance indices the
evaluators are usually interested in, provided the
visit ratio to each node in the reduced (i.e.,
post-clustering) user behavior graph is equal to the
sum of the visit ratios the cluster's components had in
the original graph. Since the reduction we have just
described is equivalent to replacing each cluster with
one or more representatives of its components, and
since this is also the goal of applying clustering
techniques to the construction of executable workload
models substantially more compact than the original
workload to be modeled, this result shows that such
techniques are valid (i.e., produce accurate models)
when the assumptions and the conditions mentioned above
are satisfied. One condition which in practice is never
satisfied, however, is that the clustered commands are
characterized by exactly the same resource demands. In
fact, clustering algorithms are non-trivial just
because they have to recognize ``nearness'' among
commands with different characteristics, and group
those and only those commands whose resource demands
are sufficiently similar (where the notion of
similarity is to be defined by introducing that of
distance between two commands). Thus, the question of
the sensitivity of a workload model's accuracy to the
inevitable dispersion of the characteristics of a
cluster's components immediately arises. We know that,
if an adequate product-form model of an interactive
system can be built, if the users' behaviors can be
accurately modeled by probabilistic graphs, and if the
workload and the model of it to be constructed are
stationary, then a workload model in which all commands
with identical characteristics are grouped together and
modeled by a single representative is an accurate model
of the given workload (i.e., the model produces the
same values of the performance indices of interest as
the modeled workload when it is processed by a given
system). This is true, of course, provided the visit
ratios of the workload model's components equal the
sums of those of the corresponding workload components.
If we now apply a clustering algorithm to the given
workload, thereby obtaining clusters of similar, but
not identical, commands, and we build a workload model
by assembling cluster representatives (usually one per
cluster, for instance with demands corresponding to
those of the cluster's center of mass), by how much
will the values of the performance indices produced by
the workload model running on the given system differ
from those produced by the workload to be modeled? As
with several other problems, this could be attacked by
a mathematical approach or by an experimental one.
While a successful mathematical analysis of the
sensitivity of the major indices to the dispersion in
the resource demands of the commands being clustered
together would provide more general results, it would
also be likely to require the introduction of
simplifying assumptions (for example, having to do with
the distributions of the resource demands in a cluster
around its center of mass) whose validity would be
neither self-evident nor easy to verify experimentally.
On the other hand, an experimental approach achieves
results which, strictly speaking, are only applicable
to the cases considered in the experiments.
Extrapolations to other systems, other workloads, other
environments usually require faith, along with
experience, common sense, and familiarity with real
systems and workloads. This inherent lack of generality
is somehow counterbalanced, however, by the higher
degree of realism that is achievable with an
experimental investigation. In particular, when in a
study the properties of workloads are to play a crucial
role (there are very few studies indeed in which this
is not the case!), using a mathematical approach is
bound to raise about such properties questions that are
either very difficult or impossible to answer.
Primarily for this reason, and knowing very well the
limitations in the applicability of the results we
would obtain, we decided to adopt an experimental
approach. Since the question we were confronted with
had never been answered before (nor, to our knowledge,
had it been asked), we felt that our choice was
justified by the exploratory nature of the study. If
the resulting sensitivity were to turn out to be high,
we could conclude that not even under the above
assumptions can clustering techniques be trusted to
provide reasonable accuracy in all cases and hence
should not be used, or used with caution in those cases
(if they exist) in which their accuracy might be accept
able. If, on the other hand, the sensitivity were low,
then we could say that, in at least one practical case,
clustering techniques would have been shown to work
adequately (of course, under all the other assumptions
listed above). The rationale of this investigation
might be questioned by asking why it would not be more
convenient to test the validity of clustering
techniques directly, that is, by comparing the
performance indices produced by a real workload to
those produced by an executable model (artificial
workload) built according to a clustering technique.
Our answer is that, in this study as well as in
[Ferr84], we are more interested in understanding the
limitations and the implications of clustering and
other workload model design methods than in evaluating
the accuracy of clustering in a particular case. In
other words, we are not so much keen on finding out
whether the errors due to clustering are of the order
of 10\% or of 80\%, but we want to be able to
understand why they are only 10\% or as large as 80\%,
respectively. Thus, we need to decompose the total
error into the contributions to it of the various
discrepancies that any real situation exhibits with
respect to the ideal one. This paper describes a study
primarily performed to assess the magnitude of one such
contribution, that of the dispersion of the resource
demands of clustered commands. An experimental
approach, in the ease being considered here, requires
first of all that a workload for the experiment be
selected. Then, that workload is to be measured, in
order to obtain the values of the parameters defined by
the desired characterization. Next, an executable
workload model is to be built by applying a clustering
technique to the real workload selected. Then, the
workload and its model are to be run on the same
system, so that the model's accuracy can be evaluated
by comparing the performance indices produced by them.
As our study is to try to isolate the sensitivity of
that accuracy to the differences in demands among the
commands that have been grouped into the same cluster,
these differences must be made the only source of
inaccuracies in the performance produced by the model.
To isolate this contribution to the error from all of
the others, the latter sources should be eliminated.
Finally, the experiment is to be carried out, and its
results interpreted. The results show that, on the
whole, the clustering method for workload model design
is reasonably accurate in the context of the case
examined in our study. The sensitivities we found were
reasonably low. Thus, we can state that, in at least
one practical case and under the assumptions discussed
in this paper, clustering techniques for executable
workload model design have been shown to work well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raghavan:1985:CIU,
author = "S. V. Raghavan and R. Kalyanakrishnan",
title = "On the classification of interactive user behaviour
indices",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "40--48",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317809",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concepts of user behaviour entropy and user
behaviour mobility are proposed as indices for the
description of user behaviour. The user behaviour
indices are derivable from the mode probability vector
and the mode transition matrix which adequately
describe the behaviour dynamics of an interactive user.
The user behaviour indices reduce the ((n*n)+n)
dimensional parameter space to two dimensions only for
classification, without loss of information related to
the user behaviour dynamics. The classification of the
users in an interactive educational environment using
the user behaviour indices is presented as a case
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Verkamo:1985:ERL,
author = "A. Inkeri Verkamo",
title = "Empirical results on locality in database
referencing",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "49--58",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Database referencing behaviour is analyzed with
respect to locality features. The analysis is based on
database reference strings collected from several runs
of typical batch programs accessing a real database.
Locality of reference is measured by the stack distance
probability distribution, the number of block faults,
and a locality measure based on the memory reservation
size. In all the experiments, locality of reference is
observed, but it is found to be weaker than in code
referencing or even in some previous studies on
database referencing. The phase/transition concept used
in virtual memory systems is not well applicable to
database referencing, since a large part of the
locality set is constantly changing. The disruption of
the phases is predominantly due to random referencing
of data blocks. The references to index blocks show
stronger locality. In some special cases, sequentiality
is observed in the use of the data blocks. In general,
neither replacement strategies developed for virtual
memory systems nor prefetching techniques seem adequate
for performance improvement of database referencing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Khelalfa:1985:DCS,
author = "Halin M. Khelalfa and Anneliese K. von Mayrhauser",
title = "Degradable computer systems with dependent
subsystems",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "59--68",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317811",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When building a model for degradable computer systems,
it is not sufficient to merely quantify reliability and
performance measures. These indices must be
mathematically sound if they are to be used to design
such systems in an optimal way. The paper presents an
analysis of design optimisation for degradable computer
systems and shows how this particular application leads
to a system model with interdepedent subsystems. A
procedure is presented on how to solve the resulting
Markov model. Its computational complexity is compared
to another solution method and shown to be largely more
efficient.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chillarege:1985:ESW,
author = "Ram Chillarege and Ravishankar K. Lyer",
title = "The effect of system workload on error latency: an
experimental study",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "69--77",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317812",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, a methodology for determining and
characterizing error latency is developed. The method
is based on real workload data, gathered by an
experiment instrumented on a VAX 11/780 during the
normal workload cycle of the installation. This is the
first attempt at jointly studying error latency and
workload variations in a full production system.
Distributions of error latency were generated by
simulating the occurrence of faults under varying
workload conditions. A family of error latency
distributions so generated illustrate that error
latency is not so much a function of when in time a
fault occurred but rather a function of the workload
that followed the failure. The study finds that the
mean error latency varies by a 1 to 8 (hours) ratio
between high and low workloads. The method is general
and can be applied to any system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonsalves:1985:PCT,
author = "Timothy A. Gonsalves",
title = "Performance characteristics of two {Ethernets}: an
experimental study",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "78--86",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Local computer networks are increasing in popularity
for the interconnection of computers for a variety of
applications. One such network that has been
implemented on a large scale is the Ethernet. This
paper describes an experimental performance evaluation
of a 3 and a 10 Mb/s Ethernet. The effects of varying
packet length and transmission speed on throughput,
mean delay and delay distribution are quantified. The
protocols are seen to be fair and stable. These
measurements span the range from the region of high
performance of the CSMA/CD protocol to the upper limits
of its utility where performance is degraded. The
measurements are compared to the predictions of
existing analytical models. The correlation is found to
range from good to poor, with more sophisticated models
yielding better results than a simple one.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chlamtac:1985:PIS,
author = "I. Chlamtac and M. Eisinger",
title = "Performance of integrated services (voice\slash data)
{CSMA\slash CD} networks",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "87--93",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a voice/data integrated local area
communication system. Due to the high suitability of
CSMA/CD protocols for data communication and the
existence of real time voice delay constraints we
consider a hybrid TDM/CSMA/CD protocol. This model
fundamentally differs from the very well documented
voice/data integrated systems in point to point
networks in which both voice and data users are
assigned fixed duration time slots for transmission.
The TDM/CSMA/CD integrated system performance is
analysed and basic performance tradeoffs in the system
design are manifested.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chlamtac:1985:AMH,
author = "I. Chlamtac and M. Eisinger",
title = "An analytic model of the hyperchannel network using
multiple channel architecture",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "94--104",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The HYPERchannel communication network configured
around one to four channels is considered. We develop a
queueing model which characterizes the network
performance as a function of the number of channels,
the channel load and the number of stations in the
network. The model is used to analyze the multichannel
system performance and to evaluate the effect of the
channel selection mechanism, as implemented by the
HYPERchannel station interface units, on the
performance. It is shown that the network bandwidth
utilization is directly related to the channel
selection process and that it varies with network
configuration and load. These observed relations are
especially significant since they are most pronounced
in networks with small number of stations, the typical
configuration in the majority of operational
HYPERchannel networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bleistein:1985:APM,
author = "Sandra Bleistein and Shin-Sun Cho and Robert T.
Goettge",
title = "Analytic performance model of the {U.S.} en route air
traffic control computer systems",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "105--115",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An analytic performance modeling case study of a
complex command and control computer system is
presented. A queueing network model of the system was
developed and validated. Features of the model found to
be critical to its accuracy were detailed software
models, general service time distributions, and models
of transient response time behavior. Response time
prediction accuracy of the model was validated to 20
percent for moderate device utilizations. The study
shows that analytic techniques can be successfully
applied to performance modeling of complex systems.
Prediction of response time percentile values and
modeling of transient effects are identified as two
areas where improved analytic techniques would enhance
performance engineering of such systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dowdy:1985:AUM,
author = "Lawrence W. Dowdy and Manvinder S. Chopra",
title = "On the applicability of using multiprogramming level
distributions",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "116--127",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A computer system's workload is represented by its
multiprogramming level, which is defined as the number
of tasks (jobs, customers) which actively compete for
resources within the system. In a product-form queuing
network model of the system, the workload is modeled by
assuming that the multiprogramming level is either
fixed (i.e., closed model) or that the multiprogramming
level depends upon an outside arrival process (i.e.,
open model). However, in many actual systems, closed
and open models are both inappropriate since the
multiprogramming level is neither fixed nor governed by
an outside arrival process. In an actual system., the
multiprogramming level varies due to features such as
task spawning, killing, blocking, parallel processing,
and/or simultaneous resource possession. The
multiprogramming level is a random variable with an
associated distribution. This paper demonstrates that
improved models can result from using this
multiprogramming level distribution information.
Several examples relative to open versus closed models,
subsystem models, actual system models, and blocking
models are given which demonstrate the applicability of
using multiprogramming level distributions. This
applicability, shown via the examples, is the main
contribution of the paper. The examples also motivate
interesting theoretical results relating to open
models, closed models, and subsystem models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "blocking; multiprogramming level distributions; open
and closed queuing networks; subsystem modeling",
}
@Article{Krzesinski:1985:MQN,
author = "A. E. Krzesinski and P. Teunissen",
title = "Multiclass queueing networks with population
constrainted subnetworks",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "128--139",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A Multiclass Queueing Network model (MQN) is
partitioned into a set of disjoint subnetworks.
Population constraints are applied to each subnetwork
such that within each subnetwork each population chain
is either subject to an individual population
constraint, or a group of chains may be subject to a
common (shared) population constraint. Such population
constraints are necessary in order to model
multiprogramming level constraints in mainframe
computer systems and window flow control mechanisms in
computer communication networks. A computationally
efficient approximate solution method is developed for
solving MQN's with population constraints. Each
subnetwork is reduced to a single approximately flow
equivalent composite centre by assuming that the effect
of other chains on a given chain can be adequately
represented by their average customer populations. The
accuracy of the population constraint approximation is
compared against previous techniques by applying it to
a set of test cases for which simulation solutions have
previously been reported. The accuracy of the
approximation technique is found to be good and in
general is an improvement over previously published
concurrency constraint approximations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solution; mean value analysis; multiclass
queueing networks; product form solutions",
}
@Article{Branwajn:1985:NSI,
author = "Alexandre Branwajn and Yung-Li Lily Jow",
title = "A note on service interruptions",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "140--148",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This note is devoted to a few remarks on the
performance evaluation of systems with service
interruptions such as priority queues for lower
priority customers, systems subject to breakdowns, etc.
Recent work on priority queues has shown that a popular
approximation method, the ``reduced occupancy
approximation'', can be exceedingly inaccurate for a
range of parameter values. We identify a cause of
inaccuracy and, hence, propose a simple correction that
provides a substantial improvement in the results.
Using the example of a simple model with service
interruptions, we show also that conditional
probabilities can be of value in deriving recurrent
solutions to some problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
xxnote = "Check: author may be Brandwajn??",
}
@Article{Plateau:1985:SSP,
author = "Brigitte Plateau",
title = "On the stochastic structure of parallelism and
synchronization models for distributed algorithms",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "147--154",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper a new technique to handle complex Markov
models is presented. This method is based on a
description using stochastic automatas and is dedicated
to distributed algorithms modelling. One example of a
mutual exclusion algorithm in a distributed environment
is extensively analysed. The mathematical analysis is
based on tensor algebra for matrices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snyder:1985:ANS,
author = "Patricia M. Snyder and William J. Stewart",
title = "An approximate numerical solution for multiclass
preemptive priority queues with general service time
distributions",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "155--165",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper an approximate numerical solution for a
multiclass preemptive priority single server queue is
developed. The arrival process of each class follows a
Poisson distribution. The service time distribution
must have a rational Laplace transform, but is
otherwise arbitrary and may be different for different
classes. The work reported here was motivated by a
desire to compute the equilibrium probability
distribution of networks containing preemptive priority
servers. Such networks are frequently encountered when
modeling computer systems, medical care delivery
systems and communication networks. We wish to use an
iterative technique which constructs a series of two
station networks consisting of one station from the
original network and one ``complementary'' station
whose behavior with respect to the original station
mimics that of the rest of the network. At each
iteration, it is necessary to compute the equilibrium
probability distribution of one or more preemptive
priority queues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hevner:1985:EOD,
author = "Alan R. Hevner",
title = "Evaluation of optical disk systems for very large
database applications",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "166--172",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optical Disk Systems have significant advantages over
conventional magnetic mass storage media for very large
database applications. Among other features, optical
disk systems offer large capacity and high transfer
rate. A critical problem is how to integrate the
optical disk system into a total application system
environment while maintaining the high performance
capabilities of the optical disk. In this paper the
performance of optical disk system configurations under
realistic application environments is analyzed via
queueing models. The results provide several important
guidelines for the use of optical disk systems on large
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Houtekamer:1985:LDC,
author = "Gilbert E. Houtekamer",
title = "The local disk controller",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "173--182",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of the I/O subsystem in the 370-XA
architecture has been improved considerably with the
introduction of the new channel subsystem, as compared
to the System/370 architecture. The emphasis in the
370-XA architecture is on reducing the CPU load
associated with I/O, and on reducing the congestion in
multi-CPU, shared systems, by redesigning the channel
system. In this paper we will show that a reallocation
of the control unit logic may triple the channel
subsystem's capacity, while still using the same disk
drives. The performance gain is achieved by adding
control-unit like intelligence and local buffer memory
to each disk drive, creating a Local Disk Controller
(LDC), and thus eliminating the performance degradation
caused by reconnect failures at a high channel
utilization. The system proposed remains fully software
compatible with the current 370-XA architecture. A
simpler approach, requiring only a slight modification
to the disk drives, is also discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:1985:MCC,
author = "Philip S. Yu and Daniel M. Dias and John T. Robinson
and Balakrishna R. Iyer and Douglas Cornell",
title = "Modelling of centralized concurrency control in a
multi-system environment",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "183--191",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of multiple systems sharing a common
data base is analyzed for an architecture with
concurrency control using a centralized lock engine.
The workload is based on traces from large mainframe
systems running IBM's IMS database management system.
Based on IMS lock traces the lock contention
probability and data base buffer invalidation effect in
a multi-system environment is predicted. Workload
parameters are generated for use in event-driven
simulation models that examine the overall performance
of multi-system data sharing, and to determine the
performance impact of various system parameters and
design alternatives. While performance results are
presented for realistic system parameters, the emphasis
is on the methodology, approximate analysis technique
and on examining the factors that affect multi-system
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1985:ASO,
author = "Alexander Thomasian and In Kyung Ryu",
title = "Analysis of some optimistic concurrency control
schemes based on certification",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "192--203",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optimistic Concurrency Control-OCC schemes based on
certification are analyzed in this paper. We allow two
types of data access schemes referred to as static and
dynamic. According to the first (second) scheme a
transaction reads all the required data items at the
beginning of its processing (on demand during its
processing), respectively. After completing its
processing, each transaction is checked as to whether
it has encountered a data conflict. Validated
transactions commit; otherwise, they are restarted. A
variant of the regular (silent) commit scheme where a
committing transaction notifies conflicted transactions
to restart immediately (broadcast commit scheme) is
also considered. We use an iterative method to analyze
the performance of OCC schemes in the framework of a
system with a fixed number of transactions in multiple
classes with given probabilities for their occurrence.
The iterative method is validated against simulation
and shown to be highly accurate even for high data
contention. We present graphs/tables, which are used to
determine how system performance is affected by: (i)
various OCC schemes, (ii) transaction size, i.e.,
number of data items accessed, (iii) number of
transactions, (iv) the distribution of transaction
processing time requirements, (v) the throughput
characteristic of the system, and (vi) granule
placement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ryu:1985:RPA,
author = "In Kyung Ryu",
title = "Review of {'OS 1100-of performance algorithms: a guide
to the resource allocation algorithms of OS-1100'} by
{John C. Kelly}",
journal = j-SIGMETRICS,
volume = "13",
number = "3--4",
pages = "9--9",
month = nov,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041844.1041845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The book describes the algorithms which were used by
OS-1100 to manage the resources of Sperry 1100 computer
systems, and lists the parameters that may affect the
performance of OS-1100. However, the book fails in
providing the reader how the algorithms and the
parameters affect the performance of OS-1100. It is not
clear to the reader why the algorithm in OS-1100 was
selected and how to tune the parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perros:1985:AMF,
author = "H. G. Perros and D. Mirchandani",
title = "An analytic model of a file server for bulk file
transfers",
journal = j-SIGMETRICS,
volume = "13",
number = "3--4",
pages = "14--22",
month = nov,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041844.1041846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An analytic model of a file server is presented. The
file server was an experimental system designed to
provide an environment for storage and retrieval of
bulk files. The file server was envisaged to be
accessed by single-user workstations, equipped with
limited secondary storage, via a local area network.
The analytic model is a hierarchical model involving an
open/closed queueing network of the BCMP type and an
open queueing network with blocking. These two models
were combined together through the means of an
iterative scheme. The results obtained from the
analytic model were in close agreement with simulation
data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Domanski:1985:BIS,
author = "Bernard Domanski",
title = "Building {IMS} synthetic workloads",
journal = j-SIGMETRICS,
volume = "13",
number = "3--4",
pages = "23--28",
month = nov,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041844.1041847",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Historically, workload characterization, and cluster
analysis in particular, has been a proven technique
when applied to performance evaluation / capacity
planning studies. Given the problem of constructing a
synthetic workload that represents a production
workload, our goal is to use this technique to identify
a {\em concise}, yet accurate set of work units that
will compose the workload. For IMS, these work units
are transactions. Yet the selection of transactions
must be done with care; for an additional goal must be
to identify a {\em concise}, yet accurate set of
databases that are required by the transactions. This
paper will review clustering techniques, and apply them
to drive the transaction selection process. An
algorithm is also presented that identifies the
technique behind database selection. A case study
follows that illustrates the implementation of the
methodology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buzen:1986:MST,
author = "Jeffrey P. Buzen",
title = "Modeling {I/O} subsystems (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "1--1",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial will present techniques for modeling the
performance of I/O subsystems that incorporate
channels, control units, string controllers and direct
access devices. The presentation will focus on the
general principles involved in analyses of this type,
and will explore the strengths and weaknesses of
alternative assumptions. Attendees should gain an
overall understanding of basic analysis procedures so
they can deal with alternative I/O architectures that
are not treated explicitly in the presentation. The
material in this tutorial is mathematically oriented,
and attendees should have some familiarity with basic
queueing theory. However, the presentation is almost
entirely self contained, and all important concepts and
equations will be fully explained. Operational analysis
will be used throughout to simplify the derivation of
major results and clarify the assumptions required at
each stage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1986:WCT,
author = "Domenico Ferrari",
title = "Workload characterization (tutorial): issues and
approaches",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "1--1",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Workload characterization is that branch of
performance evaluation which concerns itself with the
measurement and modeling of the workloads to be
processed by the system being evaluated. Since all
performance indices of interest are workload-dependent,
there is no evaluation study that does not require the
characterization of one or more workloads. In spite of
the importance of the problem, our knowledge in this
area leaves much to be desired. The tutorial addresses
the main issues, both resolved and unresolved, in the
field, and surveys the major approaches that have been
proposed and are in use. Modern methods for designing
executable artificial workloads, as well as the
applications of these techniques in system procurement,
system tuning, and capacity planning are emphasized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:1986:SRM,
author = "Amrit L. Goel",
title = "Software reliability modeling (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "2--2",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are a number of views as to what software
reliability is and how it should be quantified. Some
people believe that this measure should be binary in
nature so that an imperfect program would have zero
reliability while a perfect one would have a
reliability value of one. This view parallels that of
program proving whereby the program is either correct
or incorrect. Others, however, feel that software
reliability should be defined as the relative frequency
of the times that the program works as intended by the
user. This view is similar to that taken in testing
where a percentage of the successful ewes is used as a
measure of program quality. According to the latter
viewpoint, software reliability is a probabilistic
measure and can be defined as follows: Let $F$ be a
class of faults, defined arbitrarily, and $T$ be a
measure of relevant time, the units of which are
dictated by the application at hand. Then the
reliability of the software package with respect to the
class of faults $F$ and with respect to the metric $T$,
is the probability that no fault of the class occurs
during the execution of the program for a prespecified
period of relevant time. A number of models have been
proposed during the past fifteen years to estimate
software reliability and several other performance
measures. These are based mainly on the failure history
of software and can be classified according to the
nature of the failure process studied as indicated
below. Times Between Failures Models: In this class of
models the process under study is the time between
failures. The most common approach is to assume that
the time between, say, the $ (i - 1)$ st and $i$ th
failures, follows a distribution whose parameters
depend on the number of faults remaining in the program
during this interval. Failure Count Models: The
interest of this class of models is in the number of
faults or failures in specified time intervals rather
than in times between failures. The failure counts are
assumed to follow a known stochastic process with a
time dependent discrete or continuous failure rate.
Fault Seeding Models: The basic approach in this class
of models is to ``seed'' a known number of faults in a
program which is assumed to have an unknown number of
indigenous faults. Input Domain Based Models: The basic
approach taken here is to generate a set of test cases
from an input distribution which is assumed to be
representative of the operational usage of the program.
Because of the difficulty in obtaining this
distribution, the input domain is partitioned into a
set of equivalence classes, each of which is usually
associated with a program path. In this tutorial we
discuss the key models from the above classes and the
related issues of parametric estimation, unification of
models, Bayesian interpretation, validation and
comparison of models, and determination of optimum
release time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hedlund:1986:PMI,
author = "Kye Hedlund",
title = "Performance modeling in integrated curcuit design
(tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "2--2",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial is an introduction to performance
modeling in the design of integrated circuits (ICs). It
assumes no background in either electrical engineering
or VLSI design; all relevant concepts and terminology
will be introduced. The goal is to give an overview of
the role of performance modeling in IC design, the
current state of the art, central problems and research
challenges. First, the process of IC design will be
reviewed. Every design progresses through a series of
stages: concept, architecture, implementation and
realization. Each level of design manipulates different
abstractions and hence is concerned with different
measures of design quality. Some principle measures
are: speed, silicon area, power consumption and the
number of input/output connections. There are several
different major design paradigms such as gate array,
standard cell and custom design. Each results in
different tradeoffs between flexibility, ease of
implementation and design quality. This has a
fundamental impact on both the design process and the
resulting design. Performance considerations enter into
IC design at a variety of levels: device, circuit,
logic design and architecture. Each requires different
performance models, and the designer must make
tradeoffs that are qualitatively different at different
levels. Circuit level design requires fast and accurate
models of logic gate behavior. A circuit's speed,
silicon area and power consumption must be accurately
estimated. Each of these circuit characteristics can be
traded off against the others, and the designer may
adjust the tradeoff in order to tune the circuit to the
needs of a particular application. Accurate and
computationally fast models form the basis for the
tools that assist the designer in circuit optimization.
Tools exist that accurately predict circuit performance
and that automatically optimize circuits. Integrated
circuit design is a field still in its infancy. This,
coupled with the fact that the underlying technological
base has undergone rapid change in recent years, means
that performance modeling of IC design is still in its
formative stages. Some areas (e.g. device modeling) are
more mature and better understood than others (e.g.
architectural modeling). Research opportunities are
plentiful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Artis:1986:ESP,
author = "H. Pat Artis",
title = "Expert systems for performance analysis (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "3--3",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A great portion of the formal practice called computer
performance evaluation is the application of rules of
thumb and proceduralized analysis of model results,
specific reports, and data elements based on the
experience and knowledge of the practitioner. Expert
systems provide a technique to support the analyst in
such mundane analyses and allow them to study more
complex problems that cannot easily be proceduralized.
Rather than replacing performance analysts expert
systems provide an opportunity to increase their
productivity. The tutorial focuses on a discussion of
the fundamental building blocks of expert systems:
vocabularies, rules, and policies. A familiar example
is used to illustrate using expert systems for analysis
of performance results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:1986:PIL,
author = "Satish K. Tripathi",
title = "Performance issues in local area networks (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "3--3",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial addresses performance problems in Local
Area Networks (LAN). User level performance measures
are affected both by the software as well as
communication bottlenecks. Techniques for modeling the
key components of the performance of a LAN will be
presented. Models will be presented to discuss the
throughput and response time characteristics of LANs.
We also present some measurement data obtained from a
LAN performance experiment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stone:1986:FC,
author = "Harold S. Stone and Dominique Thibaut",
title = "Footprints in the cache",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "4--8",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops an analytical model for a
cache-reload transient. When an interrupt program or
system program runs periodically in a cache-based
computer, a short cache-reload transient occurs each
time the interrupt program is invoked. That transient
depends on the size of the cache, the fraction of the
cache used by the interrupt program, and the fraction
of the cache used by background programs that run
between interrupts. We call the portion of a cache used
by a program its footprint in the cache, and we show
that the reload transient is related to the area in the
tail of a normal distribution whose mean is a function
of the footprints of the programs that compete for the
cache. We believe that the model may be useful as well
for predicting paging behavior in virtual-memory
systems with round-robin scheduling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vernon:1986:PAM,
author = "Mary K. Vernon and Mark A. Holliday",
title = "Performance analysis of multiprocessor cache
consistency protocols using generalized timed {Petri}
nets",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "9--17",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We use an exact analytical technique, based on
Generalized Timed Petri Nets (GTPNs), to study the
performance of shared bus cache consistency protocols
for multiprocessors. We develop a general framework
within which the key characteristics of the Write-Once
protocol and four enhancements that have been combined
in various ways in the literature can be identified and
evaluated. We then quantitatively assess the
performance gains for each of the four enhancements. We
consider three levels of data sharing in our workload
models. One of the enhancements substantially improves
system performance in all cases. Two enhancements are
shown to have negligible effect over the range of
workloads analyzed. The fourth enhancement shows a
small improvement for low levels of sharing, but shows
more substantial improvement as sharing is increased,
if we assume a ``good access pattern''. The effects of
two architectural parameters, the blocksize and the
main memory cycle time are also considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harrison:1986:PMP,
author = "P. G. Harrison and A. J. Field",
title = "Performance modelling of parallel computer
architectures",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "18--27",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe two types of complex server
aggregations which can be used to model collections of
components in certain types of parallel computer
systems and give a case study showing how the
aggregations may be applied in practice. Analytical
models of such systems are becoming increasingly
important as a means of guiding the often complex
design processes, particularly since recent
developments in VLSI technology now make it possible to
fabricate many paper-designs hitherto impractical for
reasons of cost. We argue that aggregations of the type
described are essential in the modelling of parallel
systems; using the proposed techniques, large numbers
of components can be modelled as queue-length-dependent
servers within a queueing network in which the number
of servers is the same as the number of distinct types
of processing element in the system being modelled.
Because the number of severs in the model is fixed i.e.
is independent of the number of processors, very large
multiprocessor systems can be modelled efficiently with
no explosion in the size of the state space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Madnick:1986:MMC,
author = "Stuart Madnick and Y. Richard Wang",
title = "Modeling multiprocessor computer systems with
unbalanced flows",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "28--34",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A performance analysis methodology using certain
aspects of queueing theory to evaluate computer system
speed performance is presented. This methodology
specifically focuses on modeling multiprocessor
computer systems with unbalanced flows (i.e., number of
transactions leaving a server is not the same as number
of transactions entering that server) due to
asynchronously spawned parallel tasks. This unbalanced
flow phenomenon, which has a significant effect on
performance, cannot be solved analytically by classical
queueing network models. A decomposition method is
applied to decompose the unbalanced flows. Formulae for
open queueing networks with unbalanced flows due to
asynchronously spawned tasks are developed.
Furthermore, an algorithm based on Buzen's convolution
algorithm is developed to test the necessary and
sufficient condition for closed system stability as
well as to compute performance measures. An average of
less than four iterations is reported for convergence
with this algorithm. A Study of the INFOPLEX
multiprocessor data storage hierarchy, comparing this
rapid solution algorithm with simulations, has shown
highly consistent results. A cost effective software
tool, using this methodology, has been developed to
analyze an architectural design, such as INFOPLEX, and
to produce measures such as throughput, utilization,
and response time so that potential performance
problems can be identified.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleeman:1986:APB,
author = "Lindsay Kleeman and Antonio Cantoni",
title = "The analysis and performance of batching arbiters",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "35--43",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class of arbiters, known as batching arbiters, is
introduced and defined. A particularly simple
decentralised example of a batching arbiter is
described, with motivation given for the batching
arbiter model adopted. It is shown that under
reasonable assumptions, batching arbiters can be
described by a finite state Markov chain. The key steps
in the analysis of the arbiter performance are the
method of assigning states, evaluation of state
transition probabilities and showing that the Markov
chain is irreducible. Arbiter performance parameters
are defined, such as proportion of time allocated to
each requester and mean waiting time for each
requester. Apart from results describing the steady
state behavior of the arbiter for general system
parameters, a number of limiting results are also
obtained corresponding to light and heavy request
loading. Finally, numerical results of practical
interest are presented, showing the performance
parameters of the arbiter versus request rates for
various configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lehoczky:1986:PRT,
author = "John P. Lehoczky and Lui Sha",
title = "Performance of real-time bus scheduling algorithms",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "44--53",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When periodic tasks with hard deadlines communicate
over a bus, the problem of hard real-time bus
scheduling arises. This paper addresses several
problems of hard real-time bus scheduling, including
the evaluation of scheduling algorithms and the issues
of message packet pacing, preemption, priority
granularity and buffering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leland:1986:LBH,
author = "Will Leland and Teunis J. Ott",
title = "Load-balancing heuristics and process behavior",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "54--69",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic load balancing in a system of loosely-coupled
homogeneous processors may employ both judicious
initial placement of processes and migration of
existing processes to processors with fewer resident
processes. In order to predict the possible benefits of
these dynamic assignment techniques, we analyzed the
behavior (CPU, disk, and memory use) of 9.5 million
Unix* processes during normal use. The observed process
behavior was then used to drive simulation studies of
particular dynamic assignment heuristics.\par
Let $ F(\cdot) $ be the probability distribution of the
amount of CPU time used by an arbitrary process. In the
environment studied we found:\par
$ \bullet $ $ (1 - F(x)) \approx r x^{-c}, $1.05 < c <
1.25;\par
$ \bullet $ $ F(\cdot) $ is far enough from exponential
to make exponential models of little use.\par
$ \bullet $ With a foreground-background process
scheduling policy in each processor, simple heuristics
for initial placement and processor migration can
significantly improve the response ratios of processes
that demand exceptional amounts of CPU, without harming
the response ratios of ordinary processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1986:CPB,
author = "Kyoo Jeong Lee and Don Towsley",
title = "A comparison of priority-based decentralized load
balancing policies",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "70--77",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load balancing policies in distributed systems divide
jobs into two classes; those processed at their of
origination (local jobs) and those processed at some
other site in the system after being transferred
through a communication network (remote jobs). This
paper considers a class of decentralized load balancing
policies that use a threshold on the local job queue
length at each host in making decisions for remote
processing. They differ from each other according to
how they assign priorities to each of these job
classes, ranging from one providing favorable treatment
to local jobs to one providing favorable treatment to
remote jobs. Under each policy, the optimal load
balancing problem is formulated as an optimization
problem with respect to the threshold parameter. The
optimal threshold is obtained numerically using
matrix-geometric formulation and an iteration method.
Last, we consider the effects that the job arrival
process can have on performance. One expects that load
balancing for systems operating in an environment of
bursty job arrivals should be more beneficial than for
an environment with random job arrivals. This fact is
observed through numerical examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{LeBoudec:1986:BEM,
author = "Jean-Yves {Le Boudec}",
title = "A {BCMP} extension to multiserver stations with
concurrent classes of customers",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "78--91",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a multiclass service station with $B$
identical exponential servers, with constant service
rate $ \mu $. At a station, the classes of customers
are sorted into $M$ concurrent groups; the discipline
of service is on a first come first served basis, but
two customers of the same group cannot be served
simultaneously. We show that product form is maintained
when such stations are inserted in BCMP networks, and
give closed form expressions for the steady-state
probabilities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conway:1986:EAS,
author = "A. E. Conway and N. D. Georganas",
title = "An efficient algorithm for semi-homogeneous queueing
network models",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "92--99",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The class of product-form semi-homogeneous queueing
networks is introduced as a generalization of the class
of homogeneous networks, which has been considered by
Balbo et al for the performance modeling of local area
networks. In semi-homogeneous networks, the relative
traffic intensity at the various shared resources may
depend on the routing chain to which a customer
belongs. We develop an efficient algorithm for the
exact analysis of this class of networks. It is based
on the equations which form the foundation of RECAL, a
general purpose exact algorithm for multiple-chain
closed queueing networks. The complexity of the
algorithm is shown to be of order less than exponential
in $ (P - 1)^{1 / 2} $, where $P$ is the number of
processors (workstations) in the network. It is
therefore, in general, more efficient than a direct
application of either convolution, MVA or RECAL to the
class of semi-homogeneous queueing networks. The
algorithm presented here may be situated between the
algorithms of Balbo et al and the general purpose
algorithms, both in terms of its generality and
efficiency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nain:1986:OMH,
author = "Philippe Nain and Keith Ross",
title = "Optimal multiplexing of heterogeneous traffic with
hard constraint",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "100--108",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Considered are optimal dynamic policies for
multiplexing $ K + 1 $ heterogeneous traffic types onto
a single communication channel. The packet types arrive
to the channel according to independent Poisson
processes. The service requirements are exponential
with type dependent means. The optimization criterion
is to minimize a linear combination of the average
delays for packet types 1 to $K$, while simultaneously
subjecting the average delay of type-0 packets to a
hard constraint. The optimal multiplexing policy is
shown to be a randomized modification of the ``$ \mu c$
rule''. The optimization problem is thereby reduced to
a problem of finding the optimal randomization factor;
an algorithm, which can be implemented in real time, is
given to do this for two particular cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:1986:CTP,
author = "Kenneth Sevcik and Marjory J. Johnson",
title = "Cycle time properties of the {FDDI} token ring
protocol (extended abstract)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "109--110",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Communication technology now makes it possible to
support high data transmission rates at relatively low
cost. In particular, optical fiber can be used as the
medium in local area networks with data rates in the
range of 100 megabits per second. Unfortunately, local
area network topologies and communication protocols
that work well with lower speed media are not
necessarily appropriate when the data transmission rate
is scaled up by approximately an order of magnitude.
Recognizing this fact, an ANSI sub-committee (ANSIX3T9)
has been working for the past two years on a proposed
standard for a token ring protocol tailored to a
transmission medium with transmission rate in the 100
megabits per second range. The protocol is referred to
as the FDDI (Fiber Distributed Data Interface) Token
Ring protocol. The proposal for the standard is now
quite mature and nearly stable. While numerous analyses
of the performance of token ring protocols have been
carried out and described in the literature, these have
for the most part dealt with protocol variations of
less complexity than FDDI. The major feature that
distinguishes FDDI from token ring protocols that have
been analyzed previously is the concept of a ``timed
token'', which selectively allocates the right to
transmit data among the stations depending in part on
how rapidly the token progressed around the ring on the
previous cycle. A station is allowed to transmit
certain types of data only if the token's last cycle
has been shorter than a ``target'' token rotation time.
This feature makes it possible to give guaranteed
response to time-critical messages. The ``timed token''
creates some dependencies among transmissions at
various stations, however, and these dependencies
complicate the analysis of the protocol's performance.
The basic ideas of the timed token protocol on which
the FDDI protocol is based were first presented by Grow
[``A Timed-Token Protocol for Local Area Networks'',
Electro `82, 1982]. He distinguished two types of
traffic. Synchronous traffic is a type of traffic that
has delivery time constraints. Examples include voice
and video transmissions, where delays in transmission
can result in disruptions of the sound or picture
signal. Asynchronous traffic has no such time
constraints, or at least the time constraints are
measured in units that are large relative to the token
cycle time. Here is a brief overview of how the ``timed
token'' protocol works. The stations on the local area
network choose, in a distributed fashion, a target
token rotation time (TTRT). Basically, the TTRT is
chosen to be sufficiently small that requirements for
responsiveness at every station will be met. The right
to use network bandwidth for transmission of
synchronous traffic is allocated among the stations in
a manner that guarantees that network capacity is not
exceeded. The token is then forced by the protocol to
circulate with sufficient speed that all stations
receive their allocated fractions of capacity for
synchronous traffic. This is done by conditioning the
right to transmit asynchronous messages on the fact
that the token has rotated sufficiently fast that it is
``ahead of schedule'' in delivering synchronous
allocations to the stations. In essence, the TTRT value
dictates a departure schedule for the token to pass
from station to station, and asynchronous traffic can
be transmitted only when doing so does not cause that
schedule to broken. Subsequently, Ulm [``A Timed Token
Ring Local Area Network and Its Performance
Characteristics'', Proc. of Conf. on Local Area
Networks, IEEE, 1982] analyzed the protocol described
by Grow and determined its sensitivity to various
parameters. He considered the effect of overheads and
provided a number of graphs indicating the impact of
various parameters on maximum transmission capacity. As
well as describing the timed token protocol, Grow and
Ulm included intuitive arguments supporting two
fundamental properties of (a somewhat idealized version
of) the protocol. These two properties are: The average
token cycle time in the absence of failures is at most
the TTRT. The maximum token cycle time in the absence
of failures is at most twice the TTRT. Both these
properties are important to the successful operation of
the protocol. The first one guarantees that the average
long run bandwidth provided to each station is at least
its allocated fraction of the network's capacity. The
second property guarantees that, in the absence of
component failures, the time between a station's
successive opportunities to transmit synchronous
traffic will never exceed twice the target token
rotation time. While Grow and Ulm assert that these
properties hold for the timed-token protocol, neither
formal proofs nor references are provided. Because the
FDDI protocol is based on the same timed-token
protocol, subsequent publications specifically
describing the FDDI protocol have also claimed that the
two properties hold. In this paper, we prove both
properties using a common notational framework. We
first treat an idealized situation in which several
types of overhead are ignored. We actually study a
protocol that is slightly more liberal that the FDDI
proposed standard in that it allows asynchronous
transmission more often because ``lateness'' is not
carried forward from cycle to cycle. The protocol
variation, which still guarantees properties (1) and
(2), is at least as easily implemented as the original
version. Also, it guarantees sufficient responsiveness
and capacity for the transmission of synchronous
traffic, while providing improved responsiveness to
asynchronous transmissions. When overheads are
considered, it is found that the proposed standard FDDI
protocol satisfies the constraint on average token
rotation time (relying on the retention of ``lateness''
from cycle to cycle), but not the one on maximum cycle
time. We analyze a variation of the protocol that
ignores accumulated lateness, but accounts for the
various overhead sources. The advantages of the new
rule include: It guarantees both desired properties
without having to retain ``lateness'' from one cycle to
the next. It provides better service to asynchronous
requests in the case where the amount of overhead is
small relative the token rotation time. (When the
amount of overhead is large, the original proposed
protocol may have token rotation times significantly in
excess of twice the TTRT.) It is easier to implement.
Work is underway on the task of quantifying the
performance of the FDDI protocol by determining
estimates of, or tighter bounds on, the average token
rotation time and on the average delivery time of a
submitted message. The properties established in this
paper are required to form the basis of the
quantitative analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dallery:1986:ADP,
author = "Yves Dallery and Rajan Suri",
title = "Approximate disaggregation and performance bounds for
queueing networks with multiple-server stations",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "111--128",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce the concept of approximate disaggregation
which enables us to replace a station by a subnetwork,
i.e. a set of stations, such that the performance of
the derived network is close to the performance of the
initial network. We use this concept to disaggregate
any multiple-server station into a set single-server
stations. Using two different disaggregations, we are
able to bound the performance of the initial network by
the performance of a ``lower'' and an ``upper'' network
each consisting of single-server stations, whose
performance can in turn be bounded by the Balanced Job
Bounds (or other known bounds). Several examples show
the useful information provided by these bounds at a
very low cost: for $K$ stations and $N$ customers, the
computational complexity here is $ \Omega (K)$ which is
significantly less than the $ \Omega (K N^2)$
operations required for exact solution. Indeed, despite
the multiple server stations, the computational
complexity of our bounds is the same as that of
Balanced Job Bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate disaggregation; closed queueing networks;
performance bounds; product form networks",
}
@Article{Strelen:1986:GMV,
author = "Johann Strelen",
title = "A generalization of mean value analysis to higher
moments: moment analysis",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "129--140",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Closed product-form queueing networks are considered.
Recursive schemata are proposed for the higher moments
of the number of customers in the queues, called
``moment analysis''. As with mean value analysis (MVA),
in general no state probabilities are needed.
Approximation techniques for these schemata similar to
those existing for MVA are introduced.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massey:1986:PAD,
author = "William A. Massey",
title = "A probabilistic analysis of a database system",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "141--146",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In Gray, Homan, Obermarck, and Korth [GHOK], the
authors give many conjectures based on simulation for
the probabilistic analysis of transaction lock-waits
and transaction deadlocks. In this paper, we introduce
a probabilistic model to explain their observations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Witkowski:1986:PEM,
author = "Andrew Witkowski",
title = "Performance evaluation of multiversion with the
{Oracle} synchronization",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "147--158",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a new analytical model for
performance measurements of timestamp driven databases.
The model is based on two-dimensional Poisson processes
where one coordinate represents the real arrival time
and the other the timestamp of an arriving messages.
The notion of preemption is defined which serves as a
model for synchronization. Preemption naturally implies
such performance measures as response time and amount
of abortion in the system. The concept of oracle is
introduced which allows evaluation of a lower bound on
the synchronization cost. Preemption and the oracle are
then used to evaluate performance of the Multiversion
synchronization. We present the distribution and the
expectation of the synchronization cost. The analysis
is then applied to a database with exponential
communication delays ($ \alpha $) and the intensity of
transaction $ \lambda $. It is shown that for
Multiversion, this cost depends linearly on $ l /
\alpha $ and logarithmically on $ \lambda $.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Swinghal:1986:PAA,
author = "Mukesh Swinghal and A. K. Agrawala",
title = "Performance analysis of an algorithm for concurrency
control in replicated database systems",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "159--169",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we analyze the performance of a
concurrency control algorithm for replicated database
systems. We present a model of a distributed database
system which provides a framework to study the
performance of different concurrency control
algorithms. We discuss performance criteria to evaluate
different algorithms. We use the model to analyze the
performance of an algorithm for concurrency control in
replicated database systems. The technique used in
analysis is iterative and approximate. We plot a set of
performance measures for several values of the model
parameters. The results of analysis are compared
against a simulation study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solutions; error analysis; mean value
analysis; moment analysis; multiclass queueing
networks; product-form solutions",
}
@Article{Haikala:1986:AMP,
author = "Ilkka Haikala",
title = "{ARMA} models of program behaviour",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "170--179",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In models of virtual memory computer systems, it is
generally assumed that the time intervals between the
page (or segment) faults, often called lifetimes, are
independent from each other. Due to the
phase-transition behaviour in many real programs this
is not always true, and strong correlations may exist
between successive lifetimes. These correlations may
have a notable effect on the system behaviour. This
paper describes a series of experiments where
autoregressive-moving average (ARMA) models are used to
describe the correlation structure in sequences of
lifetimes. It is shown that many real program
executions can be described with models having four
parameters only, i.e. with the ARMA(1,1) models. The
models can be used as parts of simulation models for
instance, and they also give us better understanding
about the program behaviour in general.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Majumdar:1986:MAL,
author = "Shikharesh Majumdar and Richard B. Bunt",
title = "Measurement and analysis of locality phases in file
referencing behaviour",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "180--192",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent research has demonstrated the existence of
locality in short-term file referencing behaviour. A
detailed study of the dynamic characteristics of file
referencing is presented in this paper. The concept of
Bounded Locality Intervals from the field of program
behaviour has been used to model the locality phases of
file referencing behaviour. The model is found to be
powerful both from a descriptive point of view and from
the perspective of understanding the performance
implications of locality properties of file referencing
behaviour on file system management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Razouk:1986:MOS,
author = "Rami R. Razouk and Terri Stewart and Michael Wilson",
title = "Measuring operating system performance on modern
micro-processors",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "193--202",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of micro-processors and commercial operating
systems in real-time applications demands a good
understanding of factors which influence software
performance. Advances in micro-processor design (e.g.
pipelining) make performance prediction based on
instruction cycle counts difficult. In addition, the
increasing complexity of operating systems raises
doubts about our ability to ensure that their
performance will meet system requirements. Performance
measurement is more important than ever. This paper
describes an ongoing project intended to use
performance measurements to characterize the
performance of real-time systems software. To date the
project has conducted extensive experiments on an
in-house operating system running on Intel's 286/10
micro-computer in order to test the feasibility of
accurate and repeatable measurement of O/S performance.
The measurement approach, which views the software from
a resource-consumption standpoint, can be applied to
both O/S and application level software. Some of the
measurement results are presented here and are used to
test the manufacturer's assumptions about the
hardware's performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicola:1986:QAF,
author = "Victor F. Nicola and V. G. Kulkarni and Kishor S.
Trivedi",
title = "Queueing analysis of fault-tolerant computer systems
(extended abstract)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "203--203",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing models provide a useful tool for predicting
the performance of many service systems including
computer systems, telecommunication systems,
computer/communication networks and flexible
manufacturing systems. Traditional queueing models
predict system performance under the assumption that
all service facilities provide failure-free service. It
must, however, be acknowledged that service facilities
do experience failures and that they get repaired. In
recent years, it has been increasingly recognized that
this separation of performance and
reliability/availability models is no longer adequate.
An exact steady-state queueing analysis of such systems
is considered by several authors and is carried out by
means of generating functions, supplementary variables,
embedded Markov process and renewal theory, or
probabilistic techniques [1,2,7,8]. Another approach is
approximate, in which it is assumed that the time to
reach the steady-state is much smaller than the times
to failures/repairs. Therefore, it is reasonable to
associate a performance measure (reward) with each
state of the underlying Markov (or semi-Markov) model
describing the failure/repair behavior of the system.
Each of these performance measures is obtained from the
steady-state queueing analysis of the system in the
corresponding state [3,5]. Earlier we have developed
models to derive the distribution of job completion
time in a failure-prone environment [3,4]. In these
models, we need to consider a possible loss of work due
to the occurrence of a failure, i.e., the interrupted
job may be resumed or restarted upon service
resumption. Note that the job completion time analysis
includes the delays due to failures and repairs. The
purpose of this paper [9] is to extend our earlier
analysis so as to account for the queueing delays. In
effect, we consider an exact queueing analysis of
fault-tolerant systems in order to obtain the
steady-state distribution and the mean of the number of
jobs in the system. In particular, we study a system in
which jobs arrive in a Poisson fashion and are serviced
according to FCFS discipline. The service requirements
of the incoming jobs form a sequence of independent and
identically distributed random variables. The
failure/repair behaviour of the system is modelled by
an irreducible continuous-time Markov chain, which is
independent of the number of jobs in the system. Let
the state-space be $ \{ 1, 2, \ldots {}, n \} $. When
the computer system is in state $i$ it delivers service
at rate $ r_i \geq 0$. Furthermore, depending on the
type of the state, the work done on the job is
preserved or lost upon entering that state. The actual
time required to complete a job depends in a complex
way upon the service requirement of the job and the
evolution of the state of the system. Note that even
though the service requirements of jobs are independent
and identically distributed, the actual times required
to complete these jobs are neither independent nor
identically distributed, and hence the model cannot be
reduced to a standard M/G/1 queue [8]. As loss of work
due to failures and interruptions is quite a common
phenomenon in fault-tolerant computer systems, the
model proposed here is of obvious interest. Using our
earlier results on the distribution of job completion
time we set up a queueing model and show that it has
the block M/G/1 structure. Queueing models with such a
structure have been studied by Neuts, Lucantoni and
others [6]. We demonstrate the usefulness of our
approach by performing the numerical analysis for a
system with two processors subject to failures and
repairs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1986:ACQ,
author = "E. G. {Coffman, Jr.} and E. Gelenbe and E. N.
Gilbert",
title = "Analysis of a conveyor queue in a flexible
manufacturing system",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "204--223",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a flexible manufacturing system stations are
arranged along a common conveyor that brings items for
processing to the stations and also carries away the
processed items. At each station specialized robots
automatically load and unload items on and off the
conveyor. We examine here a single station in such a
system. A new kind of queueing problem arises, with
input-output dependencies that result because the same
conveyor transports items both to and from the station.
The paper analyzes two models of a station. Model 1 has
one robot that cannot return a processed item to the
conveyor while unloading a new item for processing.
Model 2 has two robots to allow simultaneous loading
and unloading of the conveyor. A principal goal of the
analysis is the proper choice of the distance
separating the two points at which items leave and
rejoin the conveyor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kouvatsos:1986:MEQ,
author = "Demetres D. Kouvatsos",
title = "A maximum entropy queue length distribution for the
{G/G/1} finite capacity queue",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "224--236",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new ``hybrid'' analytic framework, based on the
principle of maximum entropy, is used to approximate
the queue length distribution of a G/G/1 finite buffer
queue. Robust recursive relations are derived and
asymptotic connections to the infinite capacity queue
are established. Furthermore, ``equivalence''
principles are applied to analyse two-stage cyclic
queues with general service times and favourable
comparisons with global balance solutions are made.
Numerical examples provide useful information on how
critically system behaviour is affected by the
distributional form of interarrival and service
patterns. It is shown that the maximum entropy solution
predicts the bottleneck ``anomaly'' and also it defines
bounds on system performance. Comments on the
implication of the work to the analysis and aggregation
of computer systems are included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Takagi:1986:QAN,
author = "Hideaki Takagi and Masayuki Murata",
title = "Queueing analysis of nonpreemptive reservation
priority discipline",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "237--244",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Analysis is given to a nonpreemptive priority queueing
system with $P$ classes of messages where the class of
message to be served next is the highest priority class
waiting at the time of service start. (If this were the
highest priority class waiting at the service
completion epoch, we would have a classical
nonpreemptive head-of-line priority queueing system.)
We assume that the message service time distribution is
identical for all classes. The mean message waiting
time is obtained explicitly for each class, and
numerically compared to the values in the corresponding
head-of-line system. We have also proposed and
evaluated a fairness measure to demonstrate the degree
of discrimination. This model can be applied to the
performance analysis of the prioritized token-ring
scheme in local area computer networks when the
propagation delay and bit latency are negligible
compared to the frame transmission time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hofri:1986:QSP,
author = "Micha Hofri",
title = "Queueing systems with a procrastinating server",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "245--253",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two related problems are analyzed and discussed: A
queueing system that differs from the standard M/G/1
only in that at the end of a busy-period the server
takes a sequence of vacations, inspecting the state of
the queue at the end of each. When the length of the
queue exceeds a predetermined level $m$ it returns to
serve the queue exhaustively. Two queues, with Poisson
arrivals and general service-time distributions are
attended by a single server. When the server is
positioned at a certain queue it will serve the latter
exhaustively, and at busy-period end will only switch
to the other if the queue length there exceeds in size
a predetermined threshold mi. The treatment combines
analytic and numerical methods. Only steady-state
results are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boxma:1986:WTA,
author = "O. J. Boxma and B. Meister",
title = "Waiting-time approximations for cyclic-service systems
with switch-over times",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "254--262",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mean waiting-time approximations are derived for a
single-server multi-queue system with nonexhaustive
cyclic service. Non-zero switch-over times of the
server between consecutive queues are assumed. The main
tool used in the derivation is a pseudo-conservation
law recently found by Watson. The approximation is
simpler and, as extensive simulations show, more
accurate than existing approximations. Moreover, it
gives very good insight into the qualitative behavior
of cyclic-service queueing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:1986:MFA,
author = "Irene Hu",
title = "Measuring file access patterns in {UNIX}",
journal = j-SIGMETRICS,
volume = "14",
number = "2",
pages = "15--20",
month = aug,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/15827.15828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:16 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "UNIX is a disk-based operating system, where only the
system kernel is always memory-resident. A combination
of small block size, limited read-ahead and numerous
seeks can severely limit the file system throughput.
This paper presents a tool to study the file access
patterns. Information derived from the data collected
can be used to determine the optimal disk block size
and also to improve the block placement strategy. The
tool is a software monitor, installed at the device
driver level, and triggered by every physical request
to the disk handler. The design approach used to
measure the average number of logical records accessed
sequentially is described. An evaluation of the tool is
also presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1986:CIP,
author = "Domenico Ferrari",
title = "Considerations on the insularity of performance
evaluation",
journal = j-SIGMETRICS,
volume = "14",
number = "2",
pages = "21--32",
month = aug,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/15827.15829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:16 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The author argues that systems performance evaluation,
in the first twenty years of its existence, has
developed in substantial isolation with respect to such
disciplines as computer architecture, system
organization, operating systems, and software
engineering. The possible causes for this phenomenon,
which seems to be unique in the history of engineering,
are explored. Its positive and negative effects on
computer science and technology, as well as on
performance evaluation itself, are discussed. In the
author's opinion, the drawbacks of isolated development
outweigh its advantages. Thus, the author proposes
instructional and research initiatives to foster the
rapid integration of the performance evaluation
viewpoint into the main stream of computer science and
engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:1987:RWD,
author = "Satish K. Tripathi and Steve Kaisler and Sharat
Chandran and Ashok K. Agrawala",
title = "Report on the {Workshop on Design \& Performance
Issues in Parallel Architectures}",
journal = j-SIGMETRICS,
volume = "14",
number = "3--4",
pages = "16--32",
month = jan,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/25286.25287",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:20 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Machines that perform computations in parallel have
come into vogue today partly prodded by technology and
user needs. In the early spring of `86, a workshop was
held under the auspices of the University of Maryland
Institute for Advanced Computer Studies (UMIACS) to
investigate the design and the not-usually-addressed
issue of the performance of these machines. This report
serves as a record of the workshop though it does not
promise to be a transcript of the various sessions.
About a dozen presentations interspersed with spirited
open-forum discussions have been paraphrased here. It
is hoped that this report remains faithful to the
proceedings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gray:1987:VDS,
author = "Jim Gray",
title = "A view of database system performance measures",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "3--4",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Database systems allow quick creation of performance
problems. The goal of database systems is to allow the
computer-illiterate to write complex and complete
applications. It is the job of the system to translate
a high-level description of data and procedures into
efficient algorithms. The REAL performance metric of a
system is how successfully it meets these goals.
Practitioners use a much narrower definition of system
performance. They assume a standard workload and
measure performance by peak throughput and by dollar
cost per transaction. Although many vendors have
``private'' performance measures, Bitton, Dewitt, and
Turbyfill were the first to publish a measure of
database system performance [Bitton]. Their measure,
here called the Wisconsin benchmark, consists of a
database design, a set of 32 retrieval and update
statements, and a script for multi-user tests. They
give two performance metrics: the elapsed time for each
statement and the throughput of the system when running
sixteen simultaneous scripts. No response time
requirement or cost measure is included in the
definition. The Wisconsin benchmark is the most widely
used database benchmark. Largely in response to the
Wisconsin benchmark, an informal group including Bitton
and Dewitt, defined a benchmark more representative of
transaction processing applications [Anon]. Its
workload is: SCAN --- A mini-batch operation to
sequentially copy 1000 records SORT --- A batch
operation to sort one million records. DebitCredit ---
A short transaction with terminal input and output via
X.25, presentation services, and a mix of five database
accesses. The DebitCredit transaction has rules for
scaling the terminal network and database size as the
transaction rate increases, and also rules for
distributing transactions if the system is
decentralized. The performance metrics for this
benchmark are: Elapsed time for the SCAN and SORT. Peak
throughput for the DebitCredit transaction at 1 second
response time for 95\% of the transactions. This gives
a TPS (Transactions Per Second) rating. Price per
transaction where price is the 5-year cost of hardware,
software and maintenance. This is sometimes called the
vendors-view of price. This benchmark has been adopted
by several vendors to compare their performance and
price performance from release to release and also to
compare their performance to competitive products.
MIPS, Whetstones and MegaFLOPs have served a similar
role in the scientific community. A system's TPS rating
indicates not just processor speed, but also IO
architecture, operating system, data communications and
database software performance. Unfortunately, it does
not capture ease-of-use. Work continues on formalizing
these benchmarks. At present they are written in
English. Ultimately they should be defined by a file
generator and a set of programs written in a standard
database language such as COBOL-SQL. When a vendor
first measures his system against these benchmarks, the
results are usually terrible. Both benchmarks are
designed to expose generic performance bugs in
frequently used transaction processing atoms. For
example, the Wisconsin and SCAN benchmarks heavily
penalize a system which is slow to read the next record
in a file. A system with poor performance on these
benchmarks can be analyzed as follows: Most vendors
have an ``atomic'' model of their system which
represents each transaction as a collection of atoms.
The atoms are the primitives of the system. For
example, the SCAN benchmark is represented by most
vendors as: SCAN: BEGIN TRANSACTION PERFORM 1000 TIMES
READ SEQUENTIAL INSERT SEQUENTIAL COMMIT TRANSACTION
The atomic weights for, BEGIN, READ SEQUENTIAL, INSERT
SEQUENTIAL, and COMMIT are measured for each release.
The atomic weight usually consists of CPU instructions,
message bytes, and disc IOs for a ``typical'' call to
that operation. These weights can be converted to
service times by knowing the speeds and utilizations of
the devices (processors, discs, lines) used for the
application. The molecular weight and service time of
SCAN can then be computed as the sum of the atomic
weights. Defining and measuring a system's atoms is
valuable. It produces a simple conceptual model of how
the system is used. Atomic measurements also expose
performance bugs. For example, based on the SCAN
benchmark, most systems perform READ SEQUENTIAL in 1000
instructions and with 0.02 disc IO. If a system uses
many more instructions or many more IO then it has a
performance problem. Similarly, the DebitCredit
transaction typically consumes about 2OOKi (thousand
instructions) and five disc IO per transaction. One
system is known to use 800Ki and 14 IO per transaction.
The vendor could use atomic measurement to find the
causes of such poor performance. When such problems are
localized to an atom, solutions to the problem readily
suggest themselves. So, atomic measurement is useful
for performance assurance and performance improvement.
Atomic measurement also has a major role in system
sizing and in capacity planning. If the customer can
describe his application in terms of atoms, then a
spreadsheet application can give him an estimate of the
CPU, disc and line cost for the application. With
substantially more effort (and assumptions) the
system's response time can be predicted. With even more
effort, a prototype system can be generated and
benchmarked from the atomic transaction descriptions.
Snapshot [Stewart] and Envision [Envison] are examples
of systems which combine atomic modeling, queue
modeling, and ultimately benchmarking of real systems
generated from the atomic description of the
application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heidelberger:1987:PCM,
author = "Philip Heidelberger and Seetha Lakshmi",
title = "A performance comparison of multi-micro and mainframe
database architectures",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "5--6",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Database machine architectures consisting of multiple
microprocessors or mini-computers are attracting wide
attention. There have been several proposals and
prototypes (see, e.g., DeWitt, Gerber, Graefe, Heytens,
Kumar and Muralikrishna (1986), Fishman, Lai and
Wilkinson (1984), Hsiao (1983), or the 1983 and 1985
Proceedings of the International Workshop on Database
Machines). There is also a commercially available
system based on multiple microprocessors (Teradata
(1984)). With these architectures it is possible to
exploit parallelism at three levels: within a single
query, within a single transaction, and by
simultaneously executing multiple independent
transactions. The rationale behind these multiple
microprocessor architectures is primarily to take
advantage of the potential lower cost per MIPS
(Millions of Instructions per Second, a measure of
processing power) of microprocessors as opposed to
mainframes. In addition, database machines may offer
incremental capacity growth as well as improved
performance for large queries by exploiting parallelism
within a single query. However, it is not clear if
database machines made of multiple microprocessors
indeed have any cost/performance advantage over a more
conventional mainframe based database management
systems. Several papers on the performance analysis of
database machines can be found in the literature (e.g.,
Salza, Terranova and Velardi (1983) or Bit and Hartman
(1985)). Most of these studies have focused on
determining the execution time of a single query in a
particular database machine architecture. Few studies
have dealt with the response time of single queries in
a multi-user environment. We are not aware of any
papers that systematically study the performance
trade-offs between a multi-microprocessor database
machine and a large mainframe system. This paper
presents such a systematic study. We examine a
hypothetical database machine that uses standard
microprocessors and disks; database machines that use
special purpose hardware are not considered here (e.g.,
Sakai, Kamiya, Iwata, Abe, Tanaka, Shibayama and
Murakami (1984)). However, we do not limit our studies
to the components available today; we also consider
processors and disks projected to be available in the
future. We assume that both the database machine and
the mainframe provide relational database functions
(e.g., Date (1986)). While there are several
applications for relational database (on-line
transaction processing, ad-hoc queries, etc.), we limit
our attention to one specific application domain;
namely high volume on-line transaction processing. In
this domain, we consider a range of transactions and
investigate the sensitivity of the two architectures to
various transaction related parameters. Dias, Iyer and
Yu (1986), in a similar study, have investigated the
issue of coupling many small systems to obtain
comparable performance of a few (coupled) large
systems. Their study is limited to a specific workload
with no parametric or sensitivity study with respect to
transaction characteristics and the architectures they
compared are quite different from the database machine
considered in this paper. For high volume transaction
processing environments, there appears to be only a
limited potential to exploit parallelism within a
single transaction. It is therefore expected that since
the database machine is made of slower processors and
since the functions are distributed across several
processors, it would require more aggregate processing
capacity, or MIPS, than the mainframe to sustain a
given throughput and a response time. Thus there is a
trade-off between the cheaper cost per MIPS of
microprocessors as opposed to mainframes and the
increase in aggregate MIPS required by the database
machine to achieve a given performance level. This
paper addresses this trade-off through the use of
queueing network performance models of the two
architectures. Assuming that the MIPS ratings of the
microprocessor and mainframe are equivalent, our models
indicate that with today's processor technology, the
performance of the database machine is sensitive to the
transaction complexity, the amount of skew in the data
access pattern, the amount of overhead required to
implement the distributed database function and the
buffer miss ratio. Furthermore, there is only a narrow
range of transaction processing workloads for which the
database machine can meet a prespecified response time
objective with only a moderate increase in aggregate
processing capacity over that of the mainframe.
However, using the technology projected for the early
1990's, our models predict that the performance of the
hypothetical database machine is less sensitive to the
above factors. Assuming that the level of lock
contention is low, the memory hierarchies of the two
architectures are equivalent (in the sense of achieving
equal buffer miss ratios), and the performance of disks
are equivalent in the two architectures, the models
predict that the performance objective can be met with
only a moderate increase in aggregate capacity for a
broader range of transaction workloads. The workloads
considered in this paper consist of relatively short
transactions based on primary key retrievals and
updates. It is therefore difficult to make general
conclusions about the overall superiority of one
architecture against the other when a mixed set of
workloads is expected (our study assumes that all
transactions have the same expected pathlength and I/O
activity). This study focused on performance issues and
specifically does not address such issues as MIPS
flexibility (general purpose versus special purpose
architectures), security, recovery and system
management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reed:1987:PRA,
author = "Daniel A. Reed and Chong-kwon Kim",
title = "Packet routing algorithms for integrated switching
networks",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "7--15",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Repeated studies have shown that a single switching
technique, either circuit or packet switching, cannot
optimally support a heterogeneous traffic mix composed
of voice, video and data. Integrated networks support
such heterogeneous traffic by combining circuit and
packet switching in a single network. To manage the
statistical variations of network traffic, we introduce
a new, adaptive routing algorithm called hybrid,
weighted routing. Simulations show that hybrid,
weighted routing is preferable to other adaptive
routing techniques for both packet switched networks
and integrated networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonsalves:1987:PEV,
author = "Timothy A. Gonsalves and Fouad A. Tobagi",
title = "Performance of the {Expressnet} with voice\slash data
traffic",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "16--26",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the past few years, local area networks have come
into widespread use for the interconnection of
computers. Together with the trend towards digital
transmission in voice telephony, this has spurred
interest in integrated voice/data networks. The
Expressnet, an implicit-token round-robin scheme using
unidirectional busses, achieves high performance even
at bandwidths of 100 Mb/s. Other features that make the
protocol attractive for voice/data traffic are bounded
delays and priorities. The latter is achieved by
devoting alternate rounds to one or the other of the
two traffic types. By the use of accurate simulation,
the performance of the Expressnet with voice/data
traffic is characterized. It is shown that the
Expressnet satisfies the real-time constraints of voice
traffic adequately even at bandwidths of 100 Mb/s. Data
traffic is able to effectively utilize bandwidth unused
by voice traffic. The trade-offs in the alternating
round priority mechanism are quantified. Loss of voice
samples under overload is shown to occur regularly in
small, frequent clips, subjectively preferable to
irregular clips. In a comparison of the Expressnet, the
contention-based Ethernet and the round-robin Token Bus
protocols, the two round-robin protocols are found to
perform better than the Ethernet under heavy load owing
to the more deterministic mode of operation. The
comparison of the two round-robin protocols highlights
the importance of minimizing scheduling overhead at
high bandwidths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agrawal:1987:ARD,
author = "Subhash Agrawal and Ravi Ramaswamy",
title = "Analysis of the resequencing delay for {M/M/m}
systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "27--35",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many virtual circuit service communications networks
such as SNA employ virtual circuit transmission method
inside the subnet. An essential feature of such
networks is that the sequence in which messages are
transmitted is maintained throughout the route from
source node to the destination node. When there are
multiple links connecting two intermediate nodes in the
route and the messages are of different lengths, then
it is possible that the messages complete transmission
at the next node out of sequence. These messages then
have to be resquenced, i.e. put in the right order, in
order to provide a virtual circuit service. The
resequencing operation introduces an additional delay
in transmission which may be significant. In this paper
the probability distribution of the resequencing delay
is obtained for the M/M/m system. Simple expressions
for the mean and coefficient of variation of the
resequencing delay are also provided. It is shown
through a variety of numerical examples that the
resequencing delay is likely to be a significant
component of the overall response time. Some
interesting aspects of dependence of the mean
resequencing delay on system parameters are studied
analytically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reed:1987:PDE,
author = "Daniel A. Reed and Allen D. Malony and Bradley D.
McCredie",
title = "Parallel discrete event simulation: a shared memory
approach",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "36--38",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bucher:1987:CLV,
author = "Ingrid Y. Bucher and Margaret L. Simmons",
title = "A close look at vector performance of
register-to-register vector computers and a new model",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "39--45",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Darema-Rogers:1987:MAP,
author = "F. Darema-Rogers and G. F. Pfister and K. So",
title = "Memory access patterns of parallel scientific
programs",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "46--58",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A parallel simulator, PSIMUL, has been used to collect
information on the memory access patterns and
synchronization overheads of several scientific
applications. The parallel simulation method we use is
very efficient and it allows us to simulate execution
of an entire application program, amounting to hundreds
of millions of instructions. We present our
measurements on the memory access characteristics of
these applications; particularly our observations on
shared and private data, their frequency of access and
locality. We have found that, even though the shared
data comprise the largest portion of the data in the
application program, on the average a small fraction of
the memory references are to shared data. The low
averages do not preclude bursts of traffic to shared
memory nor does it rule out positive benefits from
caching shared data. We also discuss issues of
synchronization overheads and their effect on
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Geist:1987:DSS,
author = "Robert Geist and Robert Reynolds and Eve Pittard",
title = "Disk scheduling in {System V}",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "59--68",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A variety of disk scheduling algorithms, including
some newly defined ones, are compared both in
simulation and in tests on a real machine running UNIX*
System V. In the real system tests, first-come
first-served (FCFS), shortest seek time first (SSTF),
and the standard System V algorithm (SVS) are all seen
to yield relatively poor mean waiting time performance
when compared to the VSCAN(0.2) algorithm and
modifications thereof suggested by Coffman.
Nevertheless, each is seen to excel along a particular
performance dimension. The adequacy of open, Poisson
arrival simulation models in predicting disk scheduling
performance is questioned, and an alternative arrival
model is suggested which offers improved predictions in
the System V environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Livny:1987:MDM,
author = "Miron Livny and Setrag Khoshafian and Haran Boral",
title = "Multi-disk management algorithms",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "69--77",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate two schemes for placing data on
multiple disks. We show that declustering (spreading
each file across several disks) is inherently better
than clustering (placing each file on a single disk)
due to a number of reasons including parallelism and
uniform load on all disks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buzen:1987:UOT,
author = "Jeffrey P. Buzen and Annie W. Shum",
title = "A unified operational treatment of {RPS} reconnect
delays",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "78--92",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Expressions are presented for RPS reconnect delays in
three basic cases: single path, multiple path with
static reconnect, multiple path with dynamic reconnect.
The assumption of homogeneous reconnects, which is
introduced in the analysis, is shown to be implicit in
many prior analyses. This assumption simplifies the
resulting equations, but more general equations are
also presented for the case where homogeneous
reconnects are not assumed. These general results have
not appeared previously. This paper also uses the
assumption of constrained independence to derive a
result for static reconnect which has only been derived
previously using the maximum entropy principle. In the
case of dynamic reconnect, constrained independence
yields an entirely new closed form result. In addition
to being a consistent extension of the static reconnect
case, this new result is the only closed form
expression for dynamic reconnect that yields a correct
solution in certain saturated cases. Constrained
independence can provide a useful alternative
assumption in many other cases where complete
independence is known to be only approximately
correct.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:1987:PAP,
author = "R. Nelson and D. Towsley and A. N. Tantawi",
title = "Performance analysis of parallel processing systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "93--94",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A centralized parallel processing system with job
splitting is considered. In such a system, jobs wait in
a central queue, which is accessible by all the
processors, and are split into independent tasks that
can be executed on separate processors. This parallel
processing system is modeled as a bulk arrival MX/M/c
queueing system where customers and bulks correspond to
tasks and jobs, respectively. Such a system has been
studied in [1, 3] and an expression for the mean
response time of a random customer is obtained.
However, since we are interested in the time that a job
spends in the system, including synchronization delay,
we must evaluate the bulk response time rather than
simply the customer response time. The job response
time is the sum of the job waiting time and the job
service time. By analyzing the bulk queueing system we
obtain an expression for the mean job waiting time. The
mean job service time is given by a set of recurrence
equations. To compare this system with other parallel
processing systems, the following four models are
considered: Distributed/Splitting (D/S), Distributed/No
Splitting (D/NS), Centralized/Splitting (C/S), and
Centralized/No Splitting (C/NS). In each of these
systems there are $c$ processors, jobs are assumed to
consist of set of tasks that are independent and have
exponentially distributed service requirements, and
arrivals of jobs are assumed to come from a Poisson
point source. The systems differ in the way jobs queue
for the processors and in the way jobs are scheduled on
the processors. The queueing of jobs for processors is
distributed if each processor has its own queue, and is
centralized if there is a common queue for all the
processors. The scheduling of jobs on the processors is
no splitting if the entire set of tasks composing that
job are scheduled to run sequentially on the same
processor once the job is scheduled. On the other hand,
the scheduling is splitting if the tasks of a job are
scheduled so that they can be run independently and
potentially in parallel on different processors. In the
splitting case a job is completed only when all of its
tasks have finished execution. In our study we compare
the mean response time of jobs in each of the systems
for differing values of the number of processors,
number of tasks per job, server utilization, and
certain overheads associated with splitting up a job.
The MX/M/c system studied in the first part of the
paper corresponds to the C/S system. In this system, as
processors become free they serve the first task in the
queue. D/. systems are studied in [2]. We use the
approximate analysis of the D/S system and the exact
analysis of the D/NS system that are given in that
paper. For systems with 32 processors or less, the
relative error in the approximation for the D/S system
was found to be less than 5 percent. In the D/NS
system, jobs are assigned to processors with equal
probabilities. The approximation we use for the mean
job response time for the C/NS system is found in [4].
Although an extensive error analysis for this system
over all parameter ranges has not been carried out, the
largest relative error for the M/E2/10 system reported
in [4] is about 0.1 percent. For all values of
utilization, \rho, our results show that the splitting
systems yield lower mean job response time than the no
splitting systems. This follows from the fact that, in
the splitting case, work is distributed over all the
processors. For any \rho, the lowest (highest) mean job
response time is achieved by the C/S system (the D/NS
system). The relative performance of the D/S system and
the C/NS system depends on the value of \rho. For small
\rho, the parallelism achieved by splitting jobs into
parallel tasks in the D/S system reduces its mean job
response time as compared to the C/NS system, where
tasks of the same job are executed sequentially.
However, for high \rho, the C/NS system has lower mean
job response time than the D/S system. This is due to
the long synchronization delay incurred in the D/S
system at high utilizations. The effect of parallelism
on the performance of parallel processing systems is
studied by comparing the performance of the C/NS system
to that of the C/S system. The performance improvement
obtained by splitting jobs into tasks is found to
decrease with increasing utilization. For a fixed
number of processors and fixed \rho, we find that by
increasing the number of tasks per job, i.e. higher
parallelism, the mean job response time of the C/NS
system relative to that of the C/S system increases. By
considering an overhead delay associated with splitting
jobs into independent tasks, we observe that the mean
job response time is a convex function of the number of
tasks, and thus, for a given arrival rate, there exists
a unique optimum number of tasks per job. We also
consider problems associated with partitioning the
processors into two sets, each dedicated to one of two
classes of jobs: edit jobs and batch jobs. Edit jobs
are assumed to consist of simple operations that have
no inherent parallelism and thus consist of only one
task. Batch jobs, on the other hand, are assumed to be
inherently parallel and can be broken up into tasks.
All tasks from either class are assumed to have the
same service requirements. A number of interesting
phenomena are observed. For example, when half the jobs
are edit jobs, the mean job response time for both
classes of jobs increases if one processor is allocated
to edit jobs. Improvement to edit jobs, at a cost of
increasing the mean job response time of batch jobs,
results only when the number of processors allocated to
edit jobs is increased to two. This, and other results,
suggest that it is desirable for parallel processing
systems to have a controllable boundary for processor
partitioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:1987:RDR,
author = "Ziao-Nan Tan and Kenneth C. Sevcik",
title = "Reduced distance routing in single-state
shuffle-exchange interconnection networks",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "95--110",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In multiprocessor architectures, it is frequently
necessary to provide parallel communication among a
potentially large number of processors and memories.
Among the many interconnection schemes that have been
proposed and analyzed, shuffle-exchange networks have
received much attention due to their ability to allow a
message to pass from any node to any other node in a
number of steps that grows only logarithmically with
the number of interconnected nodes (in the absence of
contention) while keeping the number of hardware
connections per node independent of the number of
nodes. Straight-forward use of shuffle-exchange
networks to interconnect $N$ nodes involves having
every packet pass through $ \log_2 N$ stages enroute to
its destination. By exploiting common structure in the
addresses of the source and destination nodes, however,
more sophisticated routing can reduce the average
number of steps per message below $ \log_2 N$. In this
paper, we describe and evaluate three levels of
improvements to basic single-stage shuffle-exchange
routing. Each one yields successively more benefit at
the cost of more complexity. Using simulation, we show
that the use of routing schemes that reduce the average
distance can substantially reduce average message delay
times and increase interconnection network capacity. We
quantify the performance gains only in the case where
messages from one node are destined with uniform
probability over all nodes. However, it is clear that
the advantage of the new schemes we propose would be
still greater if there is some ``locality'' of
communication that can be exploited by having the most
frequent communication occur between pairs of nodes
with shorter distances separating them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bouras:1987:QDB,
author = "Christos Bouras and John Garofalakis",
title = "Queueing delays in buffered multistage interconnection
networks",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "111--121",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29918",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our work deals with the analysis of the queueing
delays of buffered multistage Banyan networks of
multiprocessors. We provide tight upper bounds on the
mean delays of the second stage and beyond, in the case
of infinite buffers. Our results are validated by
simulations performed on a network simulator
constructed by us. The analytic work for network stages
beyond the first, provides a partial answer to open
problems posed by previous research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garcia-Molina:1987:PTM,
author = "Hector Garcia-Molina and Lawrence R. Rogers",
title = "Performance through memory",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "122--131",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two of the most important parameters of a computer are
its processor speed and physical memory size. We study
the relationship between these two parameters by
experimentally evaluating the intrinsic memory and
processor requirements of various applications. We also
explore how hardware prices are changing the cost
effectiveness of these two resources. Our results
indicate that several important applications are
``memory-bound,'' i.e., can benefit more from increased
memory than from a faster processor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jipping:1987:PPC,
author = "Michael J. Jipping and Ray Ford",
title = "Predicting performance of concurrency control
designs",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "132--142",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance is a high-priority consideration when
designing concurrent or distributed systems. The
process of designing such a system is complicated by
two factors: (1) the current state-of-the-art in
concurrent system design is very ad hoc --- software
design principles for concurrent systems are still in
their infancy, and (2) performance evaluation of
concurrent systems is quite difficult and it is
especially difficult to relate aspects of the design to
aspects of the implementation. This paper reports on
work with a performance modeling technique for
concurrent or distributed systems that allows
structured design to be related to the implementation
of the concurrency control component of the system.
First, a General Process Model (GPM) is used to
organize system design information into a six level
hierarchy. The abstract performance properties of each
level in the hierarchy have been established using
concurrency control theory. Next, we describe how to
translate the structured system design into efficient
concurrency control techniques, using elements of this
theory. Finally, a prototype automated design
evaluation tool which serves as a central component of
the design methodology is described.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dahbura:1987:PAF,
author = "Anton T. Dahbura and Krishan K. Sabnani and William J.
Hery",
title = "Performance analysis of a fault detection scheme in
multiprocessor systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "143--154",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A technique is described for detecting and diagnosing
faults at the processor level in a multiprocessor
system. In this method, a process is assigned whenever
possible to two processors: the processor that it would
normally be assigned to (primary) and an additional
processor which would otherwise be idle (secondary).
Two strategies will be described and analyzed: one
which is preemptive and another which is
non-preemptive. It is shown that for moderately loaded
systems, a sufficient percentage of processes can be
performed redundantly using the system's spare capacity
to provide a basis for fault detection and diagnosis
with virtually no degradation of response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salsburg:1987:SAC,
author = "Michael A. Salsburg",
title = "A statistical approach to computer performance
modeling",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "155--162",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Models of discrete systems are often utilized to
assist in computer engineering and procurement. The
tools for modeling have been traditionally developed
using either analytic methods or discrete event
simulation. The research presented here explores the
use of statistical techniques to augment and assist
this basic set of tools.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kerola:1987:MPM,
author = "Teemu Kerola and Herb Schwetman",
title = "{Monit}: a performance monitoring tool for parallel
and pseudo-parallel programs",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "163--174",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a performance monitoring system,
Monit, developed for performance evaluation of parallel
systems. Monit uses trace files that are generated
during the execution of parallel programs. Monit
analyzes these trace files and produces time-oriented
graphs of resource usage and system queues. Users
interactively select the displayed items, resolution,
and time intervals of interest. The current
implementation of Monit is for SUN-3 workstation, but
the program is easily adaptable to other devices. We
also introduce a parallel programming environment, PPL,
implemented as a superset of $C$ for the Sequent
Balance 8000 multi-processor system. Parallel programs
written in PPL can produce the trace files for Monit.
Monit is also integrated into a process-oriented
simulation language CSIM. CSIM allows the creation of
simulation models based on multiple processes competing
for resources. The similarity between parallel
processes in PPL and pseudo-parallel processes in CSIM
facilitates this combined use of Monit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marsan:1987:MSA,
author = "M. Ajmone Marsan and G. Balbo and G. Chiola and G.
Conte",
title = "Modeling the software architecture of a prototype
parallel machine",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "175--185",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A high-level Petri net model of the software
architecture of an experimental MIMD multiprocessor
system for Artificial Intelligence applications is
derived by direct translation of the code corresponding
to the assumed workload. Hardware architectural
constraints are then easily added, and formal reduction
rules are used to simplify the model, which is then
further approximated to obtain a performance model of
the system based on generalized stochastic Petri nets.
From the latter model it is possible to estimate the
optimal multiprogramming level of each processor so as
to achieve the maximum performance in terms of overall
throughput (number of tasks completed per unit time).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alexander:1987:WCP,
author = "William Alexander and Tom W. Keller and Ellen E.
Boughter",
title = "A workload characterization pipeline for models of
parallel systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "186--194",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The same application implemented on different systems
will necessarily present different workloads to the
systems. Characterizations of workloads intended to
represent the same application, but input to models of
different systems, must also differ in analogous ways.
We present a hierarchical method for characterizing a
workload at increasing levels of detail such that every
characterization at a lower level still accurately
represents the workload at higher levels. We discuss
our experience in using the method to feed the same
application through a workload characterization
``pipeline'' to two different models of two different
systems, a conventional relational database system and
a logic-based distributed database system. We have
developed programs that partially automate the
characterization changes that are required when the
system to be modeled changes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Graf:1987:TBD,
author = "Ingrid M. Graf",
title = "Transformation between different levels of workload
characterization for capacity planning: fundamentals
and case study",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "195--204",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models are effective tools for
capacity planning of computer systems. The base of all
performance oriented questions is the characterization
of the computer system workload. At the capacity
planning level the workload is described in
user-oriented terms. At the system level the queueing
network model requires input parameters, which differ
from the workload description at the capacity planning
level. In this paper a general procedure to transform
the parameters between these two levels is presented
and applied to a case study. The effect on system
performance of an increase in the use of an existing
application system is analysed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruan:1987:PAF,
author = "Zuwang Ruan and Walter F. Tichy",
title = "Performance analysis of file replication schemes in
distributed systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "205--215",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In distributed systems the efficiency of the network
file system is a key performance issue. Replication of
files and directories can enhance file system
efficiency, but the choice of replication techniques is
crucial. This paper studies a number of replication
techniques, including remote access, prereplication,
weighted voting, and two demand replication schemes:
polling and staling. It develops a Markov chain model,
which is capable of characterizing properties of file
access sequences, including access locality and access
bias. The paper compares the replication techniques
under three different network file system
architectures. The results show that, under reasonable
assumptions, demand replication requires fewer file
transfers than remote access, especially for files that
have a high degree of access locality. Among the demand
replication schemes, staling requires fewer auxiliary
messages than polling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cheriton:1987:NMV,
author = "David R. Cheriton and Carey L. Williamson",
title = "Network measurement of the {VMTP} request-response
protocol in the {V} distributed system",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "216--225",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Communication systems are undergoing a change in use
from stream to request-response or transaction
communication. In addition, communication systems are
becoming increasingly based on high-speed, low delay,
low error rate channels. These changes call for a new
generation of networks, network interfaces, and
transport protocol design. The performance
characteristics of request-response protocols on these
high-performance networks should guide the design of
this new generation, yet relatively little data of this
nature is available. In this paper, we present some
preliminary measurements of network traffic for a
cluster of workstations connected by Ethernet running
the V distributed operating system. We claim that this
system, with its use of a high-speed local area network
and a request-response transport protocol tuned for
RPC, provides some indication of the performance
characteristics for systems in the next generation of
communication systems. In particular, these
measurements provide an indication of network traffic
patterns, usage characteristics for request-response
protocols, and the behavior of the request-response
protocol itself. These measurements suggest in general
that a key design focus must be on minimizing network
latency and that a request-response protocol is
well-suited for this goal. This focus has implications
for protocol design and implementation as well as for
the design of networks and network interfaces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salehmohamed:1987:PEL,
author = "Mohamed Salehmohamed and W. S. Luk and Joseph G.
Peters",
title = "Performance evaluation of {LAN} sorting algorithms",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "226--233",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29929",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We adapt several parallel sorting algorithms (block
sorting algorithms) and distributed sorting algorithms
for implementation on an Ethernet network with diskless
Sun workstations. We argue that the performance of
sorting algorithms on local area networks (LANs) should
be analyzed in a manner that is different from the ways
that parallel and distributed sorting algorithms are
usually analyzed. Consequently, we propose an empirical
approach which will provide more insight into the
performance of the algorithms. We obtain data on
communication time, local processing time, and response
time (i.e. total running time) of each algorithm for
various file sizes and different numbers of processors.
Comparing the performance data with our theoretical
analysis, we attempt to provide rationale for the
behaviour of the algorithms and project the future
behaviour of the algorithms as file size, number of
processors, or interprocessor communication facilities
change.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Polyzos:1987:DAW,
author = "George C. Polyzos and Mart L. Molle",
title = "Delay analysis of a window tree conflict resolution
algorithm in a local area network environment",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "234--244",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29930",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Expressions are found for the throughput and delay
performance of a Tree Conflict Resolution Algorithm
that is used in a Local Area Network with carrier
sensing (and possibly also collision detection). We
assume that Massey's constant size window algorithm is
used to control access to the channel, and that the
resulting conflicts (if any) are resolved using a
Capetanakis-like preorder traversal tree algorithm with
d-ary splitting. We develop and solve functional
equations for various performance metrics of the system
and apply the ``Moving Server'' technique to calculate
the main component of the delay. Our results compare
very favorably with those for CSMA protocols, which are
commonly used in Local Area Networks that support
sensing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1987:SCB,
author = "Scott Shenker",
title = "Some conjectures on the behavior of
acknowledgement-based transmission control of random
access communication channels",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "245--255",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29931",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class of acknowledgment-based transmission control
algorithms is considered. In the finite population
case, we claim that algorithms based on backoff
functions which increase faster than linearly but
slower than exponentially are stable up to full channel
capacity, whereas sublinear, exponential, and
superexponential algorithms are not. In addition,
comments are made about the nature of the
quasistationary behavior in the infinite population
case, and about how systems interpolate between the
finite and infinite number of station cases. The
treatment presented here is nonrigorous, consisting of
approximate analytic arguments confirmed by detailed
numerical simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mathys:1987:ECE,
author = "Peter Mathys and Boi V. Faltings",
title = "The effect of channel-exit protocols on the
performance of finite population random-access
systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "256--267",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29932",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random-access systems (RAS) for collision-type
channels have been studied extensively under the
assumption of an infinite population which generates a
Poisson arrival process. If the population is finite
and if the (practically desirable) free-access
channel-access protocol is used, then it is shown that
the specification of a channel-exit protocol is crucial
for the stability and the fairness of the RAS.
Free-exit and blocked-exit protocols are analyzed and
it is concluded that the p-persistent blocked-exit
protocol provides the mechanisms to assure stability
and fairness for a wide range of arrival process
models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fisher:1987:IIA,
author = "Robert Fisher",
title = "The impact of interactive application development with
{CODESTAR}",
journal = j-SIGMETRICS,
volume = "15",
number = "2",
pages = "13--15",
month = aug,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/32100.32101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many companies are currently plagued with the problem
of not being able to deliver information systems
quickly enough to meet business opportunities.
Management is generally dissatisfied with the
development cycle time, and backlogs are often two
years or more. Texas Instruments has a strategic
program to solve this problem by developing an
integrated set of tools to automate the systems life
cycle of analysis, design, construction and
maintenance, and to reduce associated costs. CODESTAR,
the first major tool to be completed (currently for use
only at TI), addresses both construction and
maintenance. It supports applications ranging from
simple to complex and can be used for the development
of IMS, batch and TSO applications. For example, the
current CODESTAR was developed using the previous
CODESTAR.A pilot project assessed the impact of
CODESTAR. The project's scope included the
construction, checkout and installation of a 20-screen
IMS transaction system involving 6,000 lines of code.
The project had originally been designed, scheduled and
budgeted for a non-CODESTAR methodology. Results were
impressive. Both elapsed time and manpower were reduced
by 50 percent, while computer costs decreased
slightly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korner:1988:EED,
author = "Ulf K{\"o}rner and Serge Fdida and Harry Perros and
Gerald Shapiro",
title = "End to end delays in a catenet environment",
journal = j-SIGMETRICS,
volume = "15",
number = "3--4",
pages = "20--28",
month = feb,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041849.1041850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a hierarchical model for a catenet
environment. The model consists of three levels of
models, and it reflects the end to end delay between
two host computers each connected to a different LAN.
The two LANs are connected via gateways by a WAN. The
model incorporates a basic flow control mechanism,
standardized local area network behaviour, as well as
gateway functions in terms of packet fragmentation and
reassembly. The model can be used to obtain performance
measures such as the mean end to end delay and the
system's throughput as a function of parameters such as
arrival rate of packets, maximum window size, and
traffic mix.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:1988:TSA,
author = "Ravi S. Sharma",
title = "Three simple algorithms for the {N/1/F Problem}",
journal = j-SIGMETRICS,
volume = "15",
number = "3--4",
pages = "29--32",
month = feb,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041849.1041851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, various techniques such as Divide and
Conquer, Greedy, and Dynamic Programming are used to
solve the N/1/F problem. [4]The algorithms are
presented and proven theoretically. They are also
tested with an example. Complexity analysis is then
performed. These algorithms are different from the
previous ones that solve the same problem in that they
use the basic techniques of Operations Research in
isolation. This simplicity is an attractive feature not
only for purposes of implementation but also in
understanding the problem and its solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analysis of algorithms; computational complexity;
operations modeling; scheduling; software design",
}
@Article{Covington:1988:RPP,
author = "R. C. Covington and S. Madala and V. Mehta and J. R.
Jump and J. B. Sinclair",
title = "The {Rice Parallel Processing Testbed}",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "4--11",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55596",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the most important trends in high performance
computing is the development and general availability
of parallel processing systems. The designers and users
of such systems have the difficult task of utilizing
the available parallelism in both hardware and
algorithms effectively to realize as much performance
improvement as possible over sequential systems. This
requires matching the structure of parallel programs
with the structure of the concurrent system on which
they are to execute. This in turn makes it necessary to
develop performance evaluation techniques that are more
sophisticated and cost-effective than those currently
used. The Rice Parallel Processing Testbed (RPPT), the
subject of this paper, is a major step in this
direction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lubachevsky:1988:EDE,
author = "B. D. Lubachevsky",
title = "Efficient distributed event driven simulations of
multiple-loop networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "12--24",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulating asynchronous multiple-loop networks is
commonly considered a difficult task for parallel
programming. This paper presents two examples of
asynchronous multiple-loop networks: a stylized queuing
system and an Ising model. The network topology in both
cases is an $ n \times n $ grid on a torus. A new
distributed simulation algorithm is demonstrated on
these two examples. The algorithm combines three
elements: (1) the bounded lag restriction, (2)
precomputed minimal propagation delays, and (3) the
so-called opaque periods. Theoretical performance
evaluation suggests that if $N$ processing elements
(PEs) execute the algorithm in parallel and the
simulated system exhibits sufficient density of events,
then, in average, processing one event would require $
\Omega (\log N)$ instructions of one PE. In practice,
the algorithm has achieved substantial speed-ups: the
speed-up is greater than 16 using 25 PEs on a shared
memory MIMD bus computer, and greater than 1900 using
214 PEs on a SIMD computer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lucier:1988:PEM,
author = "B. J. Lucier",
title = "Performance evaluation for multiprocessors programmed
using monitors",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "22--29",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55598",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a classification of synchronization delays
inherent in multiprocessor systems programmed using the
monitor paradigm. This characterization is useful in
relating performance of such systems to algorithmic
parameters in subproblems such as domain decomposition.
We apply this approach to a parallel, adaptive grid
code for solving the equations of one-dimensional gas
dynamics implemented on shared memory multiprocessors
such as the Encore Multimax.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ganz:1988:QAF,
author = "A. Ganz and I. Chlamtac",
title = "Queueing analysis of finite buffer token networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "30--36",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55599",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper introduces analytic models for evaluating
demand assignment protocols in realistic finite
buffer/finite station network configurations. We
present a solution for implicit and explicit token
passing systems enabling us to model local area
networks, such as Token Bus. We provide, for the first
time, a tractable approximate solution by using an
approach based on restricted occupancy urn models. The
presented approximation involves the solving of linear
equations whose number is linear and equal only to the
number of buffers in the system. It is demonstrated
that in addition to its simplicity, the presented
approximation is also highly accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zafirovic-Vukotic:1988:PMH,
author = "M. Zafirovic-Vukotic and I. G. M. M. Niemegeers",
title = "Performance modelling of a {HSLAN} slotted ring
protocol",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "37--46",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The slotted ring protocol which is evaluated in this
paper is suitable for use at very large transmission
rates. In terms of modelling it is a multiple cyclic
server system. A few approximative analytical models of
this protocol are presented and evaluated vs the
simulation in this paper. The cyclic server model shows
to be the most accurate and usable over a wide range of
parameters. A performance analysis based on this model
is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiu:1988:CSD,
author = "D.-M. Chiu and R. Sudama",
title = "A case study of {DECnet} applications and protocol
performance",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "47--55",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55602",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is a study based on measurements of network
activities of a major site of Digital's world-wide
corporate network. The study yields two kinds of
results: (1) DECnet protocol performance information
and (2) DECnet session statistics. Protocol performance
is measured in terms of the various network overhead
(non-data) packets in routing, transport and session
layers. From these protocol performance data, we are
able to review how effective various network protocol
optimizations are; for example the on/off flow control
scheme and the delayed acknowledgement scheme in the
transport protocol. DECnet session statistics
characterizes the workload in such a large network. The
attributes of a session include the user who started
it, the application invoked, the distance between the
user and the application, the time span, the number of
packets and bytes in each direction, and the various
reasons if a session is not successfully established.
Based on a large sample of such sessions, we generate
distributions based on various attributes of sessions;
for example the application mix, the visit count
distribution and various packet number and size
distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1988:AAL,
author = "S. Shenker and A. Weinrib",
title = "Asymptotic analysis of large heterogeneous queueing
systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "56--62",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55603",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As a simple example of a large heterogeneous queueing
system, we consider a single queue with many servers
with differing service rates. In the limit of
infinitely many servers, we identify a queue control
policy that minimizes the average system delay. When
there are only two possible server speeds, we can
analyze the convergence of this policy to optimality.
Based on this result, we propose policies for large but
finite systems with a general distribution of server
speeds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eager:1988:LPB,
author = "D. L. Eager and E. D. Lazowska and J. Zahorjan",
title = "The limited performance benefits of migrating active
processes for load sharing",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "63--72",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load sharing in a distributed system is the process of
transparently sharing workload among the nodes in the
system to achieve improved performance. In
non-migratory load sharing, jobs may not be transferred
once they have commenced execution. In load sharing
with migration, on the other hand, jobs in execution
may be interrupted, moved to other nodes, and then
resumed. In this paper we examine the performance
benefits offered by migratory load sharing beyond those
offered by non-migratory load sharing. We show that
while migratory load sharing can offer modest
performance benefits under some fairly extreme
conditions, there are no conditions under which
migration yields major performance benefits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:1988:LGA,
author = "J. Hong and X. Tan and M. Chen",
title = "From local to global: an analysis of nearest neighbor
balancing on hypercube",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "73--82",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55605",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper will focus on the issue of load balancing
on a hypercube network of $N$ processors. We will
investigate a typical nearest neighbor balancing
strategy --- in which workloads among neighboring
processors are averaged at discrete time steps. The
computation model allows tasks, described by
independent random variables, to be generated and
terminated at all times. We assume that the random
variables at all nodes have equal expected value and
their variances are bounded by a constant d2, and we
let the difference DIFF between the actual load on each
node and the average load on the system describe the
deviation of the load on a node from the global average
value. The following analytical results are obtained:
The expected value of DIFF, denoted by E(DIFF), is 0.
The variance of DIFF, denoted by Var(DIFF), is
independent of time $t$, and Var(DIFF) $ \leq 1.386 d^2
+ 0.231 \log N$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:1988:ALM,
author = "K. Kant",
title = "Application level modeling of parallel machines",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "83--93",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55606",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the application level
performance modeling of parallel machines consisting of
a large number of processing elements (PE's) connected
in some regular structure such as mesh, tree,
hypercube, etc. There are $K$ problem types, each
arriving according to a Poisson process, and each of
which needs a PE substructure of some given size and
topology. Thus several problems can run on the machine
simultaneously. It is desired to characterize the
performance of such a system under various types of
allocation schemes. We show that if the queueing is
considered external to our model, it is possible to
construct a Markovian model with local balance
property. The time for which a substructure is held by
a problem could be generally distributed. The model can
be solved efficiently using standard techniques;
however, because of rather complex structure of the
state space, its direct enumeration is difficult to
avoid. We also show how the size of the state space can
be reduced when the set of allowed substructures is
highly regular. We then show how queueing delays can be
modeled approximately. Finally, we consider the
solution of models involving shared resources such as
global memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Born:1988:ADP,
author = "R. G. Born and J. R. Kenevan",
title = "Analytic derivation of processor potential utilization
in straight line, ring, square mesh, and hypercube
networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "94--103",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In multicomputer architectures, in which processors
communicate through message-passing, the overhead
encountered because of the need to relay messages can
significantly affect performance. Based upon some
simplifying assumptions including the rate at which a
processor generates messages being proportional to its
current potential utilization, processor utilizations
are analytically derived in matrix form for a
bidirectional straight line and square mesh. In
addition, closed form derivations are provided for a
unidirectional ring and an $n$-dimensional hypercube.
Finally, the theoretical results are found to be in
close agreement with discrete-event simulations of the
four architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Majumdar:1988:SMP,
author = "S. Majumdar and D. L. Eager and R. B. Bunt",
title = "Scheduling in multiprogrammed parallel systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "104--113",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Processor scheduling on multiprocessor systems that
simultaneously run concurrent applications is currently
not well-understood. This paper reports a preliminary
investigation of a number of fundamental issues which
are important in the context of scheduling concurrent
jobs on multiprogrammed parallel systems. The major
motivation for this research is to gain insight into
system behaviour and understand the basic principles
underlying the performance of scheduling strategies in
such parallel systems. Based on abstract models of
systems and scheduling disciplines, several high level
issues that are important in this context have been
analysed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:1988:HSC,
author = "N. M. Patel and P. G. Harrison",
title = "On hot-spot contention in interconnection networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "114--123",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major component of a parallel machine is its
interconnection network, which provides concurrent
communication between the processing elements. It is
common to use a multi-stage interconnection network
(MIN) which is constructed using crossbar switches and
introduces not only contention for destination
addresses but also additional contention for internal
switches. Both types of contention are increased when
non-local communication across a MIN becomes
concentrated on a certain destination address, for
example when a frequently-accessed data structure is
stored entirely in one element of a distributed memory.
Such an address, often called a hot-spot, affects the
blocking probability of paths to other destination
addresses because of the shared internal switches. This
paper describes an analytical model of hot-spot
contention and quantifies its effect on the performance
of a MIN with a circuit switching communication
protocol. We obtain performance measures for a MIN in
which partial paths are held during path building and
one destination address is more frequently chosen by
incoming traffic than other addresses.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kothari:1988:PAM,
author = "S. C. Kothari and A. Jhunjhunwala and A. Mukherjee",
title = "Performance analysis of multipath multistage
interconnection networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "124--132",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55610",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper closely examines the performance analysis
for unbuffered multipath multistage interconnection
networks. A critical discussion of commonly used
analysis is provided to identify a basic flaw in the
model. A new analysis based on the grouping of
alternate links is proposed as an alternative to
rectify the error. The results based on the new
analysis and extensive simulation are presented for
three representative networks. The simulation study
strongly supports the results of the new analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Melus:1988:MPE,
author = "J. L. Mel{\'u}s and E. Sanvicente and J.
Magri{\~n}{\'a}",
title = "Modelling and performance evaluation of multiprocessor
based packet switches",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "133--140",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an approximate analytic model for
the performance analysis of a class of multiprocessor
based packet switches. For these systems, processors
and common memory modules are grouped in clusters, each
of them composed of several processor-memory pairs that
communicate through a multiple bus interconnection
network. Intercluster communication is also achieved
using one or more busses. The whole network operates in
a circuit-switched mode. After access completion, a
processor remains active for an exponentially
distributed random time. Access times are also
exponential with different means, depending upon the
location (local, cluster, external) of the referenced
module. The arbitration is done on a priority basis.
The performance is predicted by computing the average
number of switched packets per time unit. Other related
indexes are also given. Numerical results are obtained
rather easily by solving a set of two algebraic
equations. Simulation is used to validate the accuracy
of the approximations used in the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1988:MCP,
author = "T. P. Lee",
title = "A manufacturing capacity planning experiment through
functional workload decomposition",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "141--150",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55612",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe an experiment to evaluate a
distributed architecture via functional database
workload decomposition. A workload in a circuit pack
assembly environment was decomposed and mapped onto a
frontend/backend distributed computer architecture. To
evaluate this distributed architecture, an operational
model for capacity planning was devised, and
performance and cost-effectiveness measures were
chosen. Model parameters were estimated through
benchmark experiments in a distributed system
consisting of various super-microcomputers connected by
a CSMA/CD local area network with INGRES as the
database management system. The frontend/backend
architecture consists of a backend data repository and
analysis computer system and a few frontend computer
systems dedicated for data collection and manufacturing
process verification. Because of the significant
software overhead in communication protocol and
database processing, information exchange was batched
between the backend and frontend systems to amortize
such cost to improve overall system performance.
Results of the experiments were analyzed to gain
quantitative insight on the feasibility of such
decomposition and its mapping onto the proposed
architecture. With sufficient batching, the proposed
distributed architecture not only has more overall
system capacity, but also is more cost-effective than
the typical centralized architecture. The approach
described is applicable in more general contexts.
Advantages of such distributed systems include the
relative robustness of the distributed architecture
under single point failure mode and the ease of
capacity growth by upgrading the computer systems
and/or by increasing the number of frontend systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Irgon:1988:FLS,
author = "A. E. Irgon and A. H. {Dragoni, Jr.} and T. O.
Huleatt",
title = "{FAST}: a large scale expert system for application
and system software performance tuning",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "151--156",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55613",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alexander:1988:CDC,
author = "W. Alexander and G. Copeland",
title = "Comparison of dataflow control techniques in
distributed data-intensive systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "157--166",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In dataflow architectures, each dataflow node (i.e.,
operation) is typically executed on a single physical
node. We are concerned with distributed data-intensive
systems, in which each base (i.e., persistent) set of
data has been declustered over many physical nodes to
achieve load balancing. Because of large base set size,
each operation is executed where the base set resides,
and intermediate results are transferred between
physical nodes. In such systems, each dataflow node is
typically executed on many physical nodes. Furthermore,
because computations are data-dependent, we cannot know
until run time which subset of the physical nodes
containing a particular base set will be involved in a
given dataflow node. This uncertainty affects program
loading, task activation and termination, and data
transfer among the nodes. In this paper we focus on the
problem of how a dataflow node in such an environment
knows when it has received data from all the physical
nodes from which it is ever going to receive. We call
this the dataflow control problem. The interesting part
of the problem is trying to achieve correctness
efficiently. We propose three solutions to this
problem, and compare them quantitatively by the metrics
of total message traffic, message system throughput and
data transfer response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leutenegger:1988:MVP,
author = "S. T. Leutenegger and M. K. Vernon",
title = "A mean-value performance analysis of a new
multiprocessor architecture",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "167--176",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a preliminary performance analysis
of a new large-scale multiprocessor: the Wisconsin
Multicube. A key characteristic of the machine is that
it is based on shared buses and a snooping cache
coherence protocol. The organization of the shared
buses and shared memory is unique and non-hierarchical.
The two-dimensional version of the architecture is
envisioned as scaling to 1024 processors. We develop an
approximate mean-value analysis of bus interference for
the proposed cache coherence protocol. The model
includes FCFS scheduling at the bus queues with
deterministic bus access times, and asynchronous memory
write-backs and invalidation requests. We use our model
to investigate the feasibility of the multiprocessor,
and to study some initial system design issues. Our
results indicate that a 1024-processor system can
operate at 75--95\% of its peak processing power, if
the mean time between cache misses is larger than 1000
bus cycles (i.e. 50 microseconds for 20 MHz buses; 25
microseconds for 40 MHz buses). This miss rate is not
unreasonable for the cache sizes specified in the
design, which are comparable to main memory sizes in
existing multiprocessors. We also present results which
address the issues of optimal cache block size, optimal
size of the two-dimensional Multicube, the effect of
broadcast invalidations on system performance, and the
viability of several hardware techniques for reducing
the latency for remote memory requests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1988:SAR,
author = "J. T. Blake and A. L. Reibman and K. S. Trivedi",
title = "Sensitivity analysis of reliability and performability
measures for multiprocessor systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "177--186",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional evaluation techniques for multiprocessor
systems use Markov chains and Markov reward models to
compute measures such as mean time to failure,
reliability, performance, and performability. In this
paper, we discuss the extension of Markov models to
include parametric sensitivity analysis. Using such
analysis, we can guide system optimization, identify
parts of a system model sensitive to error, and find
system reliability and performability bottlenecks. As
an example we consider three models of a 16 processor.
16 memory system. A network provides communication
between the processors and the memories. Two
crossbar-network models and the Omega network are
considered. For these models, we examine the
sensitivity of the mean time to failure, unreliability,
and performability to changes in component failure
rates. We use the sensitivities to identify bottlenecks
in the three system models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukkamala:1988:DPR,
author = "R. Mukkamala and S. C. Bruell and R. K. Shultz",
title = "Design of partially replicated distributed database
systems: an integrated methodology",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "187--196",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The objective of this research is to develop and
integrate tools for the design of partially replicated
distributed database systems. Many existing tools are
inappropriate for designing large-scale distributed
databases due to their large computational
requirements. Our goal is to develop tools that solve
the design problems reasonably quickly, typically by
using heuristic algorithms that provide approximate or
near-optimal solutions. In developing this design
methodology, we assume that information regarding the
types of user requests and their rates of arrival into
the system is known a priori. The methodology assumes a
general model for transaction execution. In this paper
we discuss three aspects of the design methodology: the
data allocation problem, the use of a static
load-balancing scheme in coordination with the
allocation scheme, and the design evaluation and review
step. Our methodology employs iterative design
techniques using performance evaluation as a means to
iterate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wybranietz:1988:MPM,
author = "D. Wybranietz and D. Haban",
title = "Monitoring and performance measuring distributed
systems during operation",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "197--206",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an integrated tool for monitoring
distributed systems continuously during operation. A
hybrid monitoring approach is used. As special hardware
support a test and measurement processor (TMP) was
designed, which is part of each node in an experimental
multicomputer system. Each TMP runs local parts of the
monitoring software for its node, while all the TMPs
are connected to a central test station via a separate
TMP interconnection network. The monitoring system is
transparent to users. It permanently observes system
behavior, measures system performance and records
system information. The immense amount of information
is graphically displayed in easy-to-read-charts and
graphs in an application-oriented manner. The tools
promote an improved understanding of run time behavior
and performance measurements to derive qualitative and
even quantitative assessments about distributed
systems. A prototype of the monitoring facility is
operational and currently experiments are being
conducted in our distributed system consisting of
several MC68000 microcomputers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Melvin:1988:UMI,
author = "S. W. Melvin and Y. N. Patt",
title = "The use of microcode instrumentation for development,
debugging and tuning of operating system kernels",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "207--214",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have developed a tool based on microcode
modifications to a VAX 8600 which allows a wide variety
of operating system measurements to be taken with
minimal perturbation and without the need to modify any
operating system software. A trace of interrupts,
exceptions, system calls and context switches is
generated as a side-effect to normal execution. In this
paper we describe the tool we have developed and
present some results we have gathered under both UNIX
4.3 BSD and VAX/VMS V4.5. We compare the process fork
behavior of two different command shells under UNIX,
look at context switch rates for interactive and batch
workloads and generate a histogram for network
interrupt service time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agawal:1988:MRC,
author = "A. Agawal and A. Gupta",
title = "Memory-reference characteristics of multiprocessor
applications under {MACH}",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "215--225",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Shared-memory multiprocessors have received wide
attention in recent times as a means of achieving
high-performance cost-effectively. Their viability
requires a thorough understanding of the memory access
patterns of parallel processing applications and
operating systems. This paper reports on the memory
reference behavior of several parallel applications
running under the MACH operating system on a
shared-memory multiprocessor. The data used for this
study is derived from multiprocessor address traces
obtained from an extended ATUM address tracing scheme
implemented on a 4-CPU DEC VAX 8350. The applications
include parallel OPS5, logic simulation, and a VSLI
wire routing program. Among the important issues
addressed in this paper are the amount of sharing in
user programs and in the operating system, comparing
the characteristics of user and system reference
patterns, sharing related to process migration, and the
temporal, spatial, and processor locality of shared
blocks. We also analyze the impact of shared references
on cache coherence in shared-memory multiprocessors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Murphy:1988:CPB,
author = "J. M. Murphy and R. B. Bunt",
title = "Characterising program behaviour with phases and
transitions",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "226--234",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A detailed quantitative study of program behaviour is
described. Reference strings from a representative set
of programs were decomposed into phases and
transitions. Referencing behaviour is studied at both
the macro level (program-wide) and the micro level
(within the phases and transitions). Quantitative data,
suitable for the parameterization of program behaviour
models, is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yoshizawa:1988:ASC,
author = "Y. Yoshizawa and T. Arai",
title = "Adaptive storage control for page frame supply in
large scale computer systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "235--243",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A real storage management algorithm called Adaptive
Control of Page-frame Supply (ACPS) is described. ACPS
employees three strategies: prediction of the demand
for real page frames, page replacement based on the
prediction, and working set control. Together, these
strategies constitute the real page frame allocation
method, and contribute to short and stable response
times in conversational processing environments. ACPS
is experimentally applied to the VOS3 operating system.
Evaluation of ACPS on a real machine shows that TSS
response times are not affected too strongly by
king-size jobs and ACPS is successful in avoiding
paging delay and thrashing. ACPS prevents extreme
shortages of real storage in almost all cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pattipati:1988:PAM,
author = "K. R. Pattipati and M. M. Kostreva",
title = "On the properties of approximate mean value analysis
algorithms for queueing networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "244--252",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents new formulations of the
approximate mean value analysis (MVA) algorithms for
the performance evaluation of closed product-form
queueing networks. The key to the development of the
algorithms is the derivation of vector nonlinear
equations for the approximate network throughput. We
solve this set of throughput equations using a
nonlinear Gauss--Seidel type distributed algorithms,
coupled with a quadratically convergent Newton's method
for scalar nonlinear equations. The throughput
equations have enabled us to: (a) derive bounds on the
approximate throughput; (b) prove the existence,
uniqueness, and convergence of the Schweitzer--Bard
(S-B) approximation algorithm for a wide class of
monotone, single class networks, (c) establish the
existence of the S-B solution for multi-class, monotone
networks, and (d) prove the asymptotic (i.e., as the
number of customers of each class tends to {\infty})
uniqueness of the S-B throughput solution, and the
asymptotic convergence of the various versions of the
distributed algorithms in multi-class networks with
single server and infinite server nodes. The asymptotic
convergence is established using results from convex
programming and convex duality theory. Extension of our
algorithms to mixed networks is straightforward. Only
multi-class results are presented in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tantawi:1988:OAM,
author = "A. N. Tantawi and G. Towsley and J. Wolf",
title = "Optimal allocation of multiple class resources in
computer systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "253--260",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class-constrained resource allocation problem is
considered. In this problem, a set of $M$ heterogeneous
resources is to be allocated optimally among a set of
$L$ users belonging to $K$ user classes. A set of class
allocation constraints, which limit the number of users
of a given class that could be allocated to a given
resource, is imposed. An algorithm with worst case time
complexity $ O(M (L M + M^2 + L K))$ is presented along
with a proof of its correctness. This problem arises in
many areas of resource management in computer systems,
such as load balancing in distributed systems,
transaction processing in distributed database systems,
and session allocation in time-shared computer systems.
We illustrate the behavior of this algorithm with an
example where file servers are to be allocated to
workstations of multiple classes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hsieh:1988:PNA,
author = "C.-H. Hsieh and S. S. Lam",
title = "{PAM} --- a noniterative approximate solution method
for closed multichain queueing networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "261--269",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55625",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Approximate MVA algorithms for separable queueing
networks are based upon an iterative solution of a set
of modified MVA formulas. Although each iteration has a
computational time requirement of $ O(M K^2) $ or less,
many iterations are typically needed for convergence to
a solution. ($M$ denotes the number of queues and $K$
the number of closed chains or customer classes.) We
present some faster approximate solution algorithms
that are noniterative. They are suitable for the
analysis and design of communication networks which may
require tens to hundreds, perhaps thousands, of closed
chains to model flow-controlled virtual channels. Three
PAM algorithms of increasing accuracy are presented.
Two of them have time and space requirements of $ O(M
K)$. The third algorithm has a time requirement of $
O(M K^2)$ and a space requirement of $ O(M K)$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1989:LBD,
author = "Anna Ha{\'c}",
title = "Load balancing in distributed systems: a summary",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "17--19",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most distributed systems are characterized by
distribution of both physical and logical features. The
architecture of a distributed system is usually
modular. Most distributed systems support a varying
number of processing elements. The system hardware,
software, data, user software and user data are
distributed across the system. An arbitrary number of
system and user processes can be executed on various
machines in the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1989:KBD,
author = "Anna Ha{\'c}",
title = "Knowledge-based distributed system architecture",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "20--20",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper introduces the learning system, the expert
system and an information broadcasting protocol for
designing and managing distributed systems. A
knowledge-based system can be implemented as a part of
operating system software to make decisions about
process transfer and message routing in a hierarchical
network. A knowledge-based system uses dynamic
information about the state of processors and
applications in the local and wide arca network. This
information consists of processors' and applications'
queue lengths, and it is broadcast to directly
connected processors. The expert system uses broadcast
information to make decisions about process transfer
and message routing, considering processor availability
and system security. The expert system causes
processors' queue lengths to become balanced on each
network hierarchy level. The number of process
transfers is calculated and depends on network
partitioning and the threshold values used by the
expert system. The convergence of the algorithms for
the knowledge-based system is proven. Performance of
the proposed system is evaluated analytically using the
elapsed time of process transfer or message transfer
and the waiting time to begin transfer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1989:DAA,
author = "Anna Ha{\'c}",
title = "Design algorithms for asynchronous operations in cache
memory",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "21--21",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The algorithms used to operate on disk buffer cache
memory have significant impact on operating system
performance. The buffer cache size, the size of the
file being written, the disk access time, and the
algorithms used to append updated blocks from the
buffer cache to the disk queue determine performance of
operations in disk cache memory. The determination of
these algorithms is particularly important since they
are implemented in the system kernel and cannot be
changed by the user or system administrator. This paper
introduces new algorithms for asynchronous operations
in disk buffer cache memory. These algorithms allow for
writing the files into the buffer cache by the
processes. The number of active processes in the system
and the length of the queue to the disk buffer cache
are considered in the algorithm design. This
information is obtained dynamically during the
execution of the algorithms. The performance of the
operations on the buffer cache is improved by using the
algorithms, which allow for writing the contents of the
buffer cache to the disk depending on the system load
and the write activity. The elapsed time of writing a
file into the buffer cache is calculated. The waiting
time to start writing a file is also considered. It is
shown that the elapsed time of writing a file decreases
by using the algorithms, which write the blocks to the
disk depending on the rate of write operations and the
number of active processes in the system. The time for
a block to become available for update in the buffer
cache is given. The number of blocks available for
update in the buffer cache is derived. The performance
of the algorithms is compared. It is shown that the
proposed algorithms allow for better performance than
an algorithm that does not use the information about
the system load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schneider:1989:AHS,
author = "Victor Schneider",
title = "Approximations for the {Halstead} software science
software error rate and project effort estimators",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "22--29",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Experimental estimators are presented relating the
expected number of software errors ($B$) in a software
development project to\par
$ \bullet $ the overall reported months of programmer
effort for the project $ (E)$ \par
$ \bullet $ the number of subprograms $ (n)$ \par
$ \bullet $ the count of thousands of coded source
statements $ (S)$.\par
These estimators are $ B \approx 7.6 E^{0.667}
S^{0.333}$ and $ B \approx n ((S / n) /
0.047)^{1.667}$.\par
These estimators are shown to be consistent with data
obtained from, the Air Force Rome Air Development
Center, the Naval Research Laboratory, and Fujitsu
Corporation. It is suggested here that more data is
needed to refine these estimators further.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Domanski:1989:PBE,
author = "Bernard Domanski",
title = "A {PROLOG}-based expert system for tuning {MVS\slash
XA}",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "30--47",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper will discuss some of the issues involved in
building an Expert System that embodies tuning rules
for IBM's MVS/XA operating system. To understand the
components of an Expert System and their functions,
PROLOG on an IBM PC (Turbo-PROLOG from Borland
International) was chosen as the development
environment. The paper will begin by defining the key
concepts about Expert Systems, Knowledge Engineering,
and Knowledge Acquisition. The reader will be given a
brief overview of PROLOG, from which we can explain how
an inference mechanism was developed. Finally, the
paper will describe the Expert System that was
developed, and additionally will provide a set of key
issues that should be addressed in the future. It is
our overall objective to provide new insight into the
application of AI to CPE.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Irvin:1989:QML,
author = "David R. Irvin",
title = "A queueing model for local area network bridges",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "48--57",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The buffer needed to match the transmission speeds of
two different local area networks interconnected by a
MAC-layer bridge is modeled as a G/M/1 queue. To
account for the problems caused by the arrivals of
traffic bursts from the higher-speed network, traffic
interarrival times are assumed to follow a
hyperexponential probability density function.
Selecting parameters for the hyperexponential
distribution to model realistic traffic conditions is
examined. A hypothetical bridge is discussed as an
example. Queue length for the G/M/1 system with
hyperexponential interarrivals is shown to depend
primarily on the persistence of bursts on the
higher-speed network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:1989:POP,
author = "J. Wolf",
title = "The placement optimization program: a practical
solution to the disk file assignment problem",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "1--10",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe a practical mathematical
formulation and solution of the so-called ``File
Assignment Problem'' (FAP) for computer disks. Our FAP
solution has been implemented in a PL/I program known
as the Placement Optimization Program (POP). The
algorithm consists of three major components --- two
heuristic optimization models and a queueing network
model. POP has been used in validation studies to
assign files to disks in two IBM MVS complexes. The
resulting savings in I/O response times were 22\% and
25\%, respectively. Throughout the paper we shall
emphasize the real-world nature of our approach to the
disk FAP, which we believe sets it apart from previous
attempts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kearns:1989:DDR,
author = "J. P. Kearns and S. DeFazio",
title = "Diversity in database reference behavior",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "11--19",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75374",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the past fifteen years, empirical studies of the
reference behavior of a number of database systems have
produced seemingly contradictory results. The presence
or absence of locality of reference and sequentiality
have both been reported (or denied) in various papers.
As such, the performance analyst or database
implementor is left with little concrete guidance in
the form of expected reference behavior of a database
system under a realistic workload. We present empirical
evidence that all of the previous results about
database reference behavior are correct (or incorrect).
That is, if the database reference sequence is viewed
on a per-transaction instance or per-database basis,
almost any reference behavior is discernible. Previous
results which report the absolute absence or presence
of a certain form of reference behavior were almost
certainly derived from reference traces which were
dominated by transactions or databases which exhibited
a certain behavior. Our sample consists of roughly
twenty-five million block references, from 350,000
transaction executions, directed at 175 operational
on-line databases at two major corporations. As such,
the sample is an order of magnitude more comprehensive
than any other reported in the literature. We also
present evidence that reference behavior is predictable
and exploitable when viewed on a per-transaction basis
or per-database basis. The implications of this
predictability for effective buffer management are
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:1989:SAD,
author = "J. Hellerstein",
title = "A statistical approach to diagnosing intermittent
performance-problems using monotone relationships",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "20--28",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Managing a computer system requires that good
performance (e.g., large throughputs, small response
times) be maintained in order to meet business
objectives. Rarely is performance consistently bad.
More frequently, performance is good one day and bad
the next. Diagnosing such intermittent
performance-problems involves determining what
distinguishes bad days from good days, such as larger
paging rates. Once this is understood, an appropriate
remedy can be found, such as buying more memory. This
paper describes a statistical approach to diagnosing
intermittent performance-problems when the
relationships among measurement variables are expressed
qualitatively as monotone relationships (e.g., paging
delays increase with the number of logged-on users). We
present a non-parametric test for monotonicity (NTM)
that evaluates monotone relationships based on FA, the
fraction of observation-pairs that agree with the
monotone relationship. An interpretation of FA in terms
of statistical significance levels is presented, and
NTM is compared to least-squares regression. Based on
NTM, an algorithm for diagnosing intermittent
performance-problems is presented. NTM and our
diagnosis algorithm are applied to measurements of four
similarly configured IBM 9370 model 60s running IBM's
operating-system Virtual Machine System Product (VM
SP).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muntz:1989:BAR,
author = "R. R. Muntz and E. {de Souza e Silva} and A. Goyal",
title = "Bounding availability of repairable computer systems",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "29--38",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov models are widely used for the analysis of
availability of computer/communication systems.
Realistic models often involve state space
cardinalities that are so large that it is impractical
to generate the transition rate matrix let alone solve
for availability measures. Various state space
reduction methods have been developed, particularly for
transient analysis. In this paper we present an
approximation technique for determining steady state
availability. Of particular interest is that the method
also provides bounds on the error. Examples are given
to illustrate the method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bubenik:1989:POM,
author = "R. Bubenik and W. Zwaenepoel",
title = "Performance of optimistic make",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "39--48",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optimistic make is a version of make that executes the
commands necessary to bring targets up-to-date prior to
the time the user types a make request. Side effects of
these optimistic computations (such as file or screen
updates) are concealed until the make request is
issued. If the inputs read by the optimistic
computations are identical to the inputs the
computation would read at the time the make request is
issued, the results of the optimistic computations are
used immediately, resulting in improved response time.
Otherwise, the necessary computations are reexecuted.
We have implemented optimistic make in the V-System on
a collection of SUN-3 workstations. Statistics
collected from this implementation are used to
synthesize a workload for a discrete-event simulation
and to validate its results. The simulation shows a
speedup distribution over pessimistic make with a
median of 1.72 and a mean of 8.28. The speedup
distribution is strongly dependent on the ratio between
the target out-of-date times and the command execution
times. In particular, with faster machines the median
of the speedup distribution grows to 5.1, and then
decreases again. The extra machine resources used by
optimistic make are well within the limit of available
resources, given the large idle times observed in many
workstation environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anderson:1989:PIT,
author = "T. E. Anderson and D. D. Lazowska and H. M. Levy",
title = "The performance implications of thread management
alternatives for shared-memory multiprocessors",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "49--60",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75378",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Threads (``lightweight'' processes) have become a
common element of new languages and operating systems.
This paper examines the performance implications of
several data structure and algorithm alternatives for
thread management in shared-memory multiprocessors.
Both experimental measurements and analytical model
projections are presented. For applications with
fine-grained parallelism, small differences in thread
management are shown to have significant performance
impact, often posing a tradeoff between throughput and
latency. Per-processor data structures can be used to
improve throughput, and in some circumstances to avoid
locking, improving latency as well. The method used by
processors to queue for locks is also shown to affect
performance significantly. Normal methods of critical
resource waiting can substantially degrade performance
with moderate numbers of waiting processors. We present
an Ethernet-style backoff algorithm that largely
eliminates this effect.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carter:1989:OIB,
author = "J. B. Carter and W. Zwaenepoel",
title = "Optimistic implementation of bulk data transfer
protocols",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "61--69",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During a bulk data transfer over a high speed network,
there is a high probability that the next packet
received from the network by the destination host is
the next packet in the transfer. An optimistic
implementation of a bulk data transfer protocol takes
advantage of this observation by instructing the
network interface on the destination host to deposit
the data of the next packet immediately into its
anticipated final location. No copying of the data is
required in the common case, and overhead is greatly
reduced. Our optimistic implementation of the V kernel
bulk data transfer protocols on SUN-3/50 workstations
connected by a 10 megabit Ethernet achieves peak
process-to-process data rates of 8.3 megabits per
second for 1-megabyte transfers, and 6.8 megabits per
second for 8-kilobyte transfers, compared to 6.1 and
5.0 megabits per second for the pessimistic
implementation. When the reception of a bulk data
transfer is interrupted by the arrival of unexpected
packets at the destination, the worst-case performance
of the optimistic implementation is only 15 percent
less than that of the pessimistic implementation.
Measurements and simulation indicate that for a wide
range of load conditions the optimistic implementation
outperforms the pessimistic implementation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stunkel:1989:TPT,
author = "C. B. Stunkel and W. K. Fuchs",
title = "{TRAPEDS}: producing traces for multicomputers via
execution driven simulation",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "70--78",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trace-driven simulation is an important aid in
performance analysis of computer systems. Capturing
address traces for these simulations is a difficult
problem for single processors and particularly for
multicomputers. Even when existing trace methods can be
used on multicomputers, the amount of collected data
typically grows with the number of processors, so I/O
and trace storage costs increase. A new technique is
presented in this paper which modifies the executable
code to dynamically collect the address trace from the
user code and analyzes this trace during the execution
of the program. This method helps resolve the I/O and
storage problems and facilitates parallel analysis of
the address trace. If a trace stored on disk is
desired, the generated trace information can also be
written to files during execution, with a resultant
drop in program execution speed. An initial
implementation on the Intel iPSC/2 hypercube
multicomputer is detailed, and sample simulation
results are presented. The effect of this trace
collection method on execution time is illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gallivan:1989:BCM,
author = "K. Gallivan and D. Gannon and W. Jalby and A. Malony
and H. Wijshoff",
title = "Behavioral characterization of multiprocessor memory
systems: a case study",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "79--88",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The speed and efficiency of the memory system is a key
limiting factor in the performance of supercomputers.
Consequently, one of the major concerns when developing
a high-performance code, either manually or
automatically, is determining and characterizing the
influence of the memory system on performance in terms
of algorithmic parameters. Unfortunately, the
performance data available to an algorithm designer
such as various benchmarks and, occasionally,
manufacturer-supplied information, e.g. instruction
timings and architecture component characteristics, are
rarely sufficient for this task. In this paper, we
discuss a systematic methodology for probing the
performance characteristics of a memory system via a
hierarchy of data-movement kernels. We present and
analyze the results obtained by such a methodology on a
cache-based multi-vector processor (Alliant FX/8).
Finally, we indicate how these experimental results can
be used for predicting the performance of simple
Fortran codes by a combination of empirical
observations, architectural models and analytical
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Samples:1989:MNL,
author = "A. D. Samples",
title = "{Mache}: no-loss trace compaction",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "89--97",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75382",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Execution traces can be significantly compressed using
their referencing locality. A simple observation leads
to a technique capable of compressing execution traces
by an order of magnitude; instruction-only traces are
compressed by two orders of magnitude. This technique
is unlike previously reported trace compression
techniques in that it compresses without loss of
information and, therefore, does not affect
trace-driven simulation time or accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:1989:ERS,
author = "A. Mukherjee and L. H. Landweber and J. C.
Strikwerda",
title = "Evaluation of retransmission strategies in a local
area network environment",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "98--107",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an evaluation of retransmission strategies
over local area networks. Expressions are derived for
the expectation and the variance of the transmission
time of the go-back-n and the selective repeat
protocols in the presence of errors. These are compared
to the expressions for blast with full retransmission
on error (BFRE) derived by Zwaenepoel [Zwa 85]. We
conclude that go-back-n performs almost as well as
selective repeat and is very much simpler to implement
while BFRE is stable only for a limited range of
messages sizes and error rates. We also present a
variant of BFRE which optimally checkpoints the
transmission of a large message. This is shown to
overcome the instability of ordinary BFRE. It has a
simple state machine and seems to take full advantage
of the low error rates of local area networks. We
further investigate go-back-n by generalizing the
analysis to an upper layer transport protocol, which is
likely to encounter among other things, variable delays
due to protocol overhead, multiple connections, process
switches and operating system scheduling priorities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danzig:1989:FBF,
author = "P. B. Danzig",
title = "Finite buffers for fast multicast",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "108--117",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75384",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When many or all of the recipients of a multicast
message respond to the multicast's sender, their
responses may overflow the sender's available buffer
space. Buffer overflow is a serious, known problem of
broadcast-based protocols, and can be troublesome when
as few as three or four recipients respond. We develop
analytical models that calculate the expected number of
buffer overflows that can be used to estimate the
number of buffers necessary for an application. The
common cure for buffer overflow requires that
recipients delay their responses by some random amount
of time in order to increase the minimum spacing
between response messages, eliminate collisions on the
network, and decrease the peak processing demand at the
sender. In our table driven algorithm, the sender tries
to minimize the multicast's latency, the elapsed time
between its initial transmission of the multicast and
its reception of the final response, given the number
of times (rounds) it is willing to retransmit the
multicast. It includes in the multicast the time
interval over which it anticipates receiving the
response, the round timeout. We demonstrate that the
latency of single round multicasts exceeds the latency
of multiple round multicasts. We show how recipients
minimize the sender's buffer overflows by independently
choosing their response times as a function of the
round's timeout, sender's buffer size, and the number
of other recipients.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:1989:PDB,
author = "B. Mukherjee",
title = "Performance of a dual-bus unidirectional broadcast
network operating under probabilistic scheduling
strategy",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "118--126",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75385",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances in fiber optic technology (viz. its
promise to provide information-carrying capacity in the
Gpbs range over long repeater-free distances) has
triggered tremendous activity in the study of
unidirectional bus networks (because signal flow in the
fiber is unidirectional). A popular network structure
that has received significant attention is the Dual-bus
Unidirectional Broadcast System (DUBS) network
topology. Most of the access mechanism studied on this
structure are based on round-robin scheduling (or some
variation thereof). However since round-robin schemes
suffer a loss of channel capacity because of their
inter-round overhead (which can be significant for long
high-speed buses), a probabilistic scheduling strategy,
called pi-persistent protocol, has recently been
proposed and studied for single channel unidirectional
bus systems. Our concern here is to apply this
probabilistic scheduling strategy to each bus in DUBS,
and study the corresponding network performance. In so
doing, we allow stations to buffer multiple packets,
represent a station's queue size by a Markov chain
model, and employ an independence assumption. We find
that the average packet delay is bounded and the
maximum network throughput approaches two pkt/slot with
increasing buffer size. Further, the protocol's
performance is insensitive to bus characteristics, and
it appears to be a particularly well suited for
fiber-optic network application requiring long
distances and high bandwidth. Simulation results, which
verify the analytical model, are also included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1989:SCP,
author = "A. G. Greenberg and J. McKenna",
title = "Solution of closed, product form, queueing networks
via the {RECAL} and tree-{RECAL} methods on a shared
memory multiprocessor",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "127--135",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75386",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "RECAL is a new recurrence relation for calculating the
partition function and various queue length moments for
closed, product form networks. In this paper we discuss
a number of the issues involved in the software
implementation of RECAL on both sequential computers
and parallel, shared memory computers. After a brief
description of RECAL, we describe software implementing
RECAL on a sequential computer. In particular, we
discuss the problems involved in indexing and data
storage. Next we describe code implementing RECAL on a
parallel, shared memory computer. Special attention is
given to designing a special buffer for temporary data
storage and several other important features of the
parallel code. Finally, we touch on software for serial
and parallel implementations of a tree algorithm for
RECAL.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paterok:1989:FQP,
author = "M. Paterok and O. Fischer and L. Opta",
title = "Feedback queues with preemption-distance priorities",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "136--145",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75387",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The method of moments is used to derive exact
analytical solutions for an open priority queueing
system with preemption-distance priorities and
feedback. Customers enter from outside in a Poisson
stream. They can feed back for several times, changing
priorities and service demands in an arbitrary manner.
During feedback they can fork and branch according to
user-defined probabilities. The service demands of the
different classes are pairwise independent and can be
arbitrarily distributed. A customer who has been
interrupted resumes his service from the point where he
was interrupted (preemptive resume). A system of linear
equations is to be solved to obtain the mean sojourn
times of each customer class in the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:1989:PSQ,
author = "D. B. Wagner and E. D. Lazowska",
title = "Parallel simulation of queueing networks: limitations
and potentials",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "146--155",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75388",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper concerns the parallel simulation of
queueing network models (QNMs) using the conservative
(Chandy--Misra) paradigm. Most empirical studies of
conservative parallel simulation have used QNMs as
benchmarks. For the most part, these studies concluded
that the conservative paradigm is unsuitable for
speeding up the simulation of QNMs, or that it is only
suitable for simulating a very limited subclass of
these models (e.g., those containing only FCFS
servers). In this paper we argue that these are
unnecessarily pessimistic conclusions. On the one hand,
we show that the structure of some QNMs inherently
limits the attainable simulation speedup. On the other
hand, we show that QNMs without such limitations can be
efficiently simulated using some recently introduced
implementation techniques. We present an analytic
method for determining an upper bound on speedup, and
use this method to identify QNM structures that will
exhibit poor simulation performance. We then survey a
number of promising implementation techniques, some of
which are quite general in nature and others of which
apply specifically to QNMs. We show how to extend the
latter to a larger class of service disciplines than
had been considered previously.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:1989:CCP,
author = "D. Mitra and I. Mitrani",
title = "Control and coordination policies for systems with
buffers",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "156--164",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75389",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study systems consisting of a number of service
cells in tandem, each containing a finite buffer.
Several policies governing the operation of such
systems are described and compared. These include
traditional and novel blocking schemes, with
applications to computer communications and production
lines. In particular, it is shown that kanban, a novel
discipline for coordinating cells in a manufacturing
context, is obtained by combining two, more basic,
concepts: a blocking policy introduced here as minimal
blocking, and shared buffers. The Kanban discipline is
superior in terms of throughput to the ordinary
transfer blocking policy. A method for analyzing
approximately the performance of the Kanban system is
also presented. This is based on examining first a
single cell in isolation and then combining the
isolated cells through fixed-point equations. Some
numerical results and comparisons with simulations are
included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1989:AMP,
author = "D. M. Nicol and J. C. Townsend",
title = "Accurate modeling of parallel scientific
computations",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "165--170",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75390",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scientific codes are usually parallelized by
partitioning a grid among processors. To achieve top
performance it is necessary to partition the grid so as
to balance workload and minimize
communication/synchronization costs. This problem is
particularly acute when the grid is irregular, changes
over the course of the computation, and is not known
until load-time. Critical mapping and remapping
decisions rest on our ability to accurately predict
performance, given a description of a grid and its
partition. This paper discusses one approach to this
problem, and illustrates its use on a one-dimensional
fluids code. The models we construct are shown
empirically to be accurate, and are used to find
optimal remapping schedules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:1989:CPA,
author = "K. C. Sevcik",
title = "Characterizations of parallelism in applications and
their use in scheduling",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "171--180",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75391",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As multiprocessors with large numbers of processors
become more prevalent, we face the task of developing
scheduling algorithms for the multiprogrammed use of
such machines. The scheduling decisions must take into
account the number of processors available, the overall
system load, and the ability of each application
awaiting activation to make use of a given number of
processors. The parallelism within an application can
be characterized at a number of different levels of
detail. At the highest level, it might be characterized
by a single parameter (such as the proportion of the
application that is sequential, or the average number
of processors the application would use if an unlimited
number of processors were available). At the lowest
level, representing all the parallelism in the
application requires the full data dependency graph
(which is more information than is practically
manageable). In this paper, we examine the quality of
processor allocation decisions under multiprogramming
that can be made with several different high-level
characterizations of application parallelism. We
demonstrate that decisions based on parallelism
characterizations with two to four parameters are
superior to those based on single-parameter
characterizations (such as fraction sequential or
average parallelism). The results are based
predominantly on simulation, with some guidance from a
simple analytic model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:1989:ART,
author = "R. D. Nelson and T. K. Philips",
title = "An approximation to the response time for shortest
queue routing",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "181--189",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75392",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we derive an approximation for the mean
response time of a multiple queue system in which
shortest queue routing is used. We assume there are $K$
identical queues with infinite capacity and service
times that are exponentially distributed. Arrivals of
jobs to this system are Poisson and are routed to a
queue of minimal length. We develop an approximation
which is based on both theoretical and experimental
considerations and, for $ K \leq 8$, has a relative
error of less than one half of one percent when
compared to simulation. For $ K = 16$, the relative
error is still acceptable, being less than 2 percent.
An application to a model of parallel processing and a
comparison of static and dynamic load balancing schemes
are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raatikainen:1989:ART,
author = "K. E. E. Raatikainen",
title = "Approximating response time distributions",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "190--199",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75393",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The response time is the most visible performance
index to users of computer systems. End-users see
individual response times, not the average. Therefore
the distribution of response times is important in
performance evaluation and capacity planning studies.
However, the analytic results cannot be obtained in
practical cases. A new method is proposed to
approximate the response-time distribution. Unlike the
previous methods the proposed one takes into account
the service-time distributions and routing behaviour.
The reported results indicate that the method provides
reasonable approximations in many cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:1989:CND,
author = "D. Mitra and A. Weiss",
title = "A closed network with a discriminatory
processor-sharing server",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "200--208",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75394",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper gives a simple, accurate first order
asymptotic analysis of the transient and steady state
behavior of a network which is closed, not product-form
and has multiple classes. One of the two nodes of the
network is an infinite server and the discipline in the
other node is discriminatory processor-sharing.
Specifically, if there are $ n_j $ jobs of class $j$ at
the latter node, then each class $j$ job receives a
fraction $ w_j / (\Lambda w_i n_i)$ of the processor
capacity. This work has applications to data networks.
For the asymptotic regime of high loading of the
processor and high processing capacity, we derive the
explicit first order transient behavior of the means of
queue lengths. We also give explicit expressions for
the steady state mean values and a simple procedure for
finding the time constants (eigenvalues) that govern
the approach to steady state. The results are based on
an extension of Kurtz's theorem concerning the fluid
limit of Markov processes. Some numerical experiments
show that the analysis is quite accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glew:1990:EII,
author = "Andy Glew",
title = "An empirical investigation of {OR} indexing",
journal = j-SIGMETRICS,
volume = "17",
number = "2",
pages = "41--49",
month = jan,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/378893.378896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers OR indexing as a substitute for,
or an optimization of, addition in an addressing mode
for a high speed processor. OR indexing is evaluated in
the context of existing address streams, using time
based sampling, and through compiler modifications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gunther:1990:PP,
author = "N. J. Gunther",
title = "Performance pathways",
journal = j-SIGMETRICS,
volume = "17",
number = "2",
pages = "50--56",
month = jan,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/378893.378898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We review the status of some recent results in the
performance analysis of computer systems which are
intrinsically unstable due to the presence of more than
one stable operating state. In particular, we consider
bistable computer systems which possess two stable
states: the typical operating point and an another
stable point, concomitant with degraded system
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonzales:1990:CHL,
author = "Michael G. Gonzales",
title = "Correction of the {Halstead} length estimator skew for
small {Pascal} programs",
journal = j-SIGMETRICS,
volume = "17",
number = "2",
pages = "57--59",
month = jan,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/378893.378899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of studies have confirmed the length
dependent skewness of Halstead's Software Science
length estimator. This paper examines the skewness for
small Pascal programs. A new model developed by
Nicholas Beser in 1983 corrects the length dependent
skew. The parameters for this model as applied to small
Pascal programs are obtained in the paper. Verification
of the correction of skewness, along with a comparison
of the variability of the two estimators, are also
examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1990:UPS,
author = "Albert G. Greenberg and Boris D. Lubachevsky and Isi
Mitrani",
title = "Unboundedly parallel simulations via recurrence
relations",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "1--12",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New methods are presented for parallel simulation of
discrete event systems that, when applicable, can
usefully employ a number of processors much larger than
the number of objects in the system being simulated.
Abandoning the distributed event list approach, the
simulation problem is posed using recurrence relations.
We bring three algorithmic ideas to bear on parallel
simulation: parallel prefix computation, parallel
merging, and iterative folding. Efficient parallel
simulations are given for (in turn) the G/G/1 queue, a
variety of queueing networks having a global first come
first served structure (e.g., a series of queues with
finite buffers), acyclic networks of queues, and
networks of queues with feedbacks and cycles. In
particular, the problem of simulating the arrival and
departure times for the first $N$ jobs to a single
G/G/1 queue is solved in time proportional to $ N / P +
\log P$ using $P$ processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:1990:PEG,
author = "Randolph Nelson",
title = "A performance evaluation of a general parallel
processing model",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "13--26",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98495",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we analyze a model of a parallel
processing system. In our model there is a single queue
which is $ K \geq 1 $ identical processors. Jobs are
assumed to consist of a sequence of barrier
synchronizations where, at each step, the number of
tasks that must be synchronized is random with a known
distribution. An exact analysis of the model is
derived. The model leads to a rich set of results
characterizing the performance of parallel processing
systems. We show that the number of jobs concurrently
in execution, as well as the number of synchronization
variables, grows linearly with the load of the system
and strongly depends on the average number of parallel
tasks found in the workload. Properties of expected
response time or such systems are extensively analyzed
and, in particular, we report on some non-obvious
response time behavior that arises as a function of the
variance of parallelism found in the workload. Based on
exact response time analysis, we propose a simple
calculation that can be used as a rule of thumb to
predict speedups. This can be viewed as a
generalization of Amdahl's law that includes queueing
effects. This generalization is reformulated when
precise workloads cannot be characterized, but rather
when only the fraction or sequential work and the
average number of parallel tasks arc assumed to be
known.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1990:ETD,
author = "Wen-Hann Wang and Jean-Loup Baer",
title = "Efficient trace-driven simulation method for cache
performance analysis",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "27--36",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose improvements to current trace-driven cache
simulation methods to make them faster and more
economical. We attack the large time and space demands
of cache simulation in two ways. First, we reduce the
program traces to the extent that exact performance can
still be obtained from the reduced traces. Second, we
devise an algorithm that can produce performance
results for a variety of metrics (hit ratio, write-back
counts, bus traffic) for a large number of
set-associative write-back caches in just a single
simulation run. The trace reduction and the efficient
simulation techniques are extended to parallel
multiprocessor cache simulations. Our simulation
results show that our approach substantially reduces
the disk space needed to store the program traces and
can dramatically speedup cache simulations and still
produce the exact results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eggers:1990:TEI,
author = "S. J. Eggers and David R. Keppel and Eric J. Koldinger
and Henry M. Levy",
title = "Techniques for efficient inline tracing on a
shared-memory multiprocessor",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "37--47",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While much current research concerns multiprocessor
design, few traces of parallel programs are available
for analyzing the effect of design trade-offs. Existing
trace collection methods have serious drawbacks:
trap-driven methods often slow down program execution
by more than 1000 times, significantly perturbing
program behavior; microcode modification is faster, but
the technique is neither general nor portable. This
paper describes a new tool, called MPTRACE, for
collecting traces of multithreaded parallel programs
executing on shared-memory multiprocessors. MPTRACE
requires no hardware or microcode modification; it
collects complete program traces; it is portable; and
it reduces execution-time dilation to less than a
factor 3. MPTRACE is based on inline tracing, in which
a program is automatically modified to produce trace
information as it executes. We show how the use of
compiler flow analysis techniques can reduce the amount
of data collected and therefore the runtime dilation of
the traced program. We also discuss problematic issues
concerning buffering and writing of trace data on a
multiprocessor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agarwal:1990:BES,
author = "Anant Agarwal and Minor Huffman",
title = "Blocking: exploiting spatial locality for trace
compaction",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "48--57",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trace-driven simulation is a popular method of
estimating the performance of cache memories,
translation lookaside buffers, and paging schemes.
Because the cost of trace-driven simulation is directly
proportional to trace length, reducing the number of
references in the trace significantly impacts
simulation time. This paper concentrates on trace
driven simulation for cache miss rate analysis.
Previous schemes, such as cache filtering, exploited
temporal locality for compressing traces and could
yield an order of magnitude reduction in trace length.
A technique called blocking and a variant called
blocking with temporal data are presented that compress
traces by exploiting spatial locality. Experimental
results show that blocking filtering combined with
cache filtering can reduce trace length by nearly two
orders of magnitude while introducing about 10\% error
in cache miss rate estimates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1990:BAF,
author = "Tein-Hsiang Lin and Kang G. Shin",
title = "A {Bayesian} approach to fault classification",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "58--66",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98505",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "According to their temporal behavior, faults in
computer systems are classified into permanent,
intermittent, and transient faults. Since it is
impossible to identify the type of a fault upon its
first detection, the common practice is to retry the
failed instruction one or more times and then use other
fault recovery methods, such as rollback or restart, if
the retry is not successful. To determine an
``optimal'' (in some sense) number of retries, we need
to know several fault parameters, which can be
estimated only after classifying all the faults
detected in the past. In this paper we propose a new
fault classification scheme which assigns a fault type
to each detected fault based on its detection time, the
outcome of retry, and its detection symptom. This
classification procedure utilizes the Bayesian decision
theory to sequentially update the estimation of fault
parameters whenever a detected fault is classified. An
important advantage of this classification is the early
identification of presence of an intermittent fault so
that appropriate measures can be taken before it causes
a serious damage to the system. To assess the goodness
of the proposed scheme, the probability of incorrect
classification is also analyzed and compared with
simulation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moser:1990:PLA,
author = "Louise E. Moser and Vikas Kapur and P. M.
Melliar-Smith",
title = "Probabilistic language analysis of weighted voting
algorithms",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "67--73",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a method of analyzing the performance of
weighted voting algorithms in a fault-tolerant
distributed system. In many distributed systems, some
processors send messages more frequently than others
and all processors share a common communication medium,
such as an Ethernet. Typical fault-tolerant voting
algorithms require that a certain minimum number of
votes be collected from different processors. System
performance is significantly affected by the time
required to collect those votes. We formulate the
problem of weighted voting in terms of probabilistic
languages and then use the calculus of generating
functions to compute the expected delay to collect that
number of votes. An application of the method to a
particular voting algorithm, the Total protocol, is
given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1990:ERA,
author = "Peter M. Chen and Garth A. Gibson and Randy H. Katz
and David A. Patterson",
title = "An evaluation of redundant arrays of disks using an
{Amdahl 5890}",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "74--85",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently we presented several disk array architectures
designed to increase the data rate and I/O rate of
supercomputing applications, transaction processing,
and file systems [Patterson 88]. In this paper we
present a hardware performance measurement of two of
these architectures, mirroring and rotated parity. We
see how throughput for these two architectures is
affected by response time requirements, request sizes,
and read to write ratios. We find that for applications
with large accesses, such as many supercomputing
applications, a rotated parity disk array far
outperforms traditional mirroring architecture. For
applications dominated by small accesses, such as
transaction processing, mirroring architectures have
higher performance per disk than rotated parity
architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:1990:SAF,
author = "Amarnath Mukherjee and Lawrence H. Landweber and John
C. Strikwerda",
title = "Simultaneous analysis of flow and error control
strategies with congestion-dependent errors",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "86--95",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1990:QAA,
author = "Arthur Y. M. Lin and John A. Silvester",
title = "Queueing analysis of an {ATM} switch with multichannel
transmission groups",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "96--105",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98514",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The discrete-time D[A]/D/c/B queueing system is
studied. We consider both a bulk arrival process with
constant bulk inter-arrival time ($D$) and general
bulk-size distribution ($A$) and a periodic arrival
process ($ D_1 + \cdots + D_N$). The
service/transmission times are deterministic ($D$) and
the system provides for a maximum of $c$ servers with a
buffer size $B$. The motivation for studying this
queueing system is its application in performance
modeling and analysis of an asynchronous transfer mode
(ATM) switch with multichannel transmission groups.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Johnson:1990:AAR,
author = "Theodore Johnson",
title = "Approximate analysis of reader and writer access to a
shared resource",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "106--114",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98517",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a queue that has two classes
of customers: readers and writers. Readers access the
resource concurrently and writers access the resource
serially. The queue discipline is FCFS: readers must
wait until all writers that arrived earlier have
completed service, and vice versa. The approximation
can predict both the expected waiting times for readers
and writers and the capacity of the queue. The queue
can be used for the analysis of operating system and
software resources that can be accessed both serially
and concurrently, such as shared files. We have used
the queue to analyze the performance of concurrent
B-tree algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anderson:1990:QTT,
author = "Thomas E. Anderson and Edward D. Lazowska",
title = "{Quartz}: a tool for tuning parallel program
performance",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "115--125",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98518",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Initial implementations of parallel programs typically
yield disappointing performance. Tuning to improve
performance is thus a significant part of the parallel
programming process. The effort required to tune a
parallel program, and the level of performance that
eventually is achieved, both depend heavily on the
quality of the instrumentation that is available to the
programmer. This paper describes Quartz, a new tool for
tuning parallel program performance on shared memory
multiprocessors. The philosophy underlying Quartz was
inspired by that of the sequential UNIX tool gprof: to
appropriately direct the attention of the programmer by
efficiently measuring just those factors that are most
responsible for performance and by relating these
metrics to one another and to the structure of the
program. This philosophy is even more important in the
parallel domain than in the sequential domain, because
of the dramatically greater number of possible metrics
and the dramatically increased complexity of program
structures. The principal metric of Quartz is
normalized processor time: the total processor time
spent in each section of code divided by the number of
other processors that are concurrently busy when that
section of code is being executed. Tied to the logical
structure of the program, this metric provides a
``smoking gun'' pointing towards those areas of the
program most responsible for poor performance. This
information can be acquired efficiently by
checkpointing to memory the number of busy processors
and the state of each processor, and then statistically
sampling these using a dedicated processor. In addition
to describing the design rationale, functionality, and
implementation of Quartz, the paper examines how Quartz
would be used to solve a number of performance problems
that have been reported as being frequently
encountered, and describes a case study in which Quartz
was used to significantly improve the performance of a
CAD circuit verifier.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pattipati:1990:CVA,
author = "Krishna R. Pattipati and Joel Wolf and Somnath Deb",
title = "A calculus of variations approach to file allocation
problems in computer systems",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "126--133",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98522",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is concerned with the parameter
optimization in closed product-form queueing networks.
Our approach is to combine the techniques of the
calculus of variations with the mean value analysis
(MVA) recursion of closed queueing networks. We view
the MVA recursion as nonlinear difference equations
describing a multi-stage system, wherein a stage
corresponds to the network population, and the response
times at each node constitute the state variables of
the multi-stage system. This viewpoint leads to a
two-point boundary value problem, in which the forward
system corresponds to the MVA recursion and the
backward system corresponds to an MVA-like adjoint
recursion. The method allows for a very general class
of objective functions, and the adjoint equations
provide the necessary information to compute the
gradient of the cost function. The optimization problem
can then be solved by any of the gradient-based
methods. For the special case when the objective
function is the network delay function, the gradient
vector is shown to be related to the moments of the
queue lengths. In addition, the adjoint vector offers
the potential for the on-line adaptive control of
queueing networks based on the state information (e.g.,
actual degree of multi-programming, response times at
the devices.) The theory is illustrated via application
to the problem of determining the optimal disk routing
probabilities in a large scale, modern I/O
(Input/Output) subsystem. A subsequent paper will deal
with extensions of the theory to multi-class
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Robinson:1990:DCM,
author = "John T. Robinson and Murthy V. Devarakonda",
title = "Data cache management using frequency-based
replacement",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "134--142",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new frequency-based replacement algorithm
for managing caches used for disk blocks by a file
system, database management system, or disk control
unit, which we refer to here as data caches.
Previously, LRU replacement has usually been used for
such caches. We describe a replacement algorithm based
on the concept of maintaining reference counts in which
locality has been ``factored out''. In this algorithm
replacement choices are made using a combination of
reference frequency and block age. Simulation results
based on traces of file system and I/O activity from
actual systems show that this algorithm can offer up to
34\% performance improvement over LRU replacement,
where the improvement is expressed as the fraction of
the performance gain achieved between LRU replacement
and the theoretically optimal policy in which the
reference string must be known in advance. Furthermore,
the implementation complexity and efficiency of this
algorithm is comparable to one using LRU replacement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dan:1990:AAL,
author = "Asit Dan and Don Towsley",
title = "An approximate analysis of the {LRU} and {FIFO} buffer
replacement schemes",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "143--152",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98525",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we develop approximate analytical
models for predicting the buffer hit probability under
the Least Recently Used (LRU) and First In First Out
(FIFO) buffer replacement policies under the
independent reference model. In the case of the
analysis of the LRU policy, the computational
complexity for estimating the buffer hit probability is
$ O(K B) $ where $B$ is the size of the buffer and $K$
denotes the number of items having distinct access
probabilities. In the case of the FIFO policy, the
solution algorithm is iterative and the computational
complexity of each iteration is $ O(K)$. Results from
these models are compared to exact results for models
originally developed by King [KING71] for small values
of the buffer size, $B$, and the total number of items
sharing the buffer, $D$. Results are also compared with
results from a simulation for large values of $B$ and
$D$. In most cases, the error is extremely small (less
than 0.1\%) for both LRU and FIFO, and a maximum error
of 3\% is observed for very small buffer size (less
than 5) when the access probabilities are extremely
skewed. To demonstrate the usefulness of the model, we
consider two applications. In our first application, we
compare the LRU and FIFO policies to an optimal static
buffer allocation policy for a database consisting of
two classes of data items. We observe that the
performance of LRU is close to that of the optimal
allocation. As the optimal allocation requires
knowledge of the access probabilities, the LRU policy
is preferred when this information is unavailable. We
also observe that the LRU policy always performs better
than the FIFO policy in our experiments. In our second
application, we show that if multiple independent
reference streams on mutually disjoint sets of data
compete for the same buffer, it is better to partition
the buffer using an optimal allocation policy than to
share a common buffer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alonso:1990:AFW,
author = "Raphael Alonso and Andrew W. Appel",
title = "An advisor for flexible working sets",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "153--162",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The traditional model of virtual memory working sets
does not account for programs that can adjust their
working sets on demand. Examples of such programs are
garbage-collected systems and databases with block
cache buffers. We present a memory-use model of such
systems, and propose a method that may be used by
virtual memory managers to advise programs on how to
adjust their working sets. Our method tries to minimize
memory contention and ensure better overall system
response time. We have implemented a memory ``advice
server'' that runs as a non-privileged process under
Berkeley Unix. User processes may ask this server for
advice about working set sizes, so as to take maximum
advantage of memory resources. Our implementation is
quite simple, and has negligible overhead, and
experimental results show that it results in sizable
performance improvements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Torrellas:1990:ACA,
author = "Joseph Torrellas and John Hennessy and Thierry Weil",
title = "Analysis of critical architectural and programming
parameters in a hierarchical",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "163--172",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable shared-memory multiprocessors are the subject
of much current research, but little is known about the
performance behavior of these machines. This paper
studies the performance effects of two machine
characteristics and two program characteristics that
seem to be major factors in determining the performance
of a hierarchical shared-memory machine. We develop an
analytical model of the traffic in a machine loosely
based on Stanford's DASH multiprocessor and use program
parameters extracted from multiprocessor traces to
study its performance. It is shown that both locality
in the data reference stream and the amount of data
sharing in a program have an important impact on
performance. Although less obvious, the bandwidth
within each cluster in the hierarchy also has a
significant performance effect. Optimizations that
improve the intracluster cache coherence protocol or
increase the bandwidth within a cluster can be quite
effective.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jog:1990:PEC,
author = "Rajeev Jog and Philip L. Vitale and James R.
Callister",
title = "Performance evaluation of a commercial cache-coherent
shared memory multiprocessor",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "173--182",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an approximate Mean Value
Analysis (MVA) model developed to project the
performance of a small-scale shared-memory commercial
symmetric multiprocessor system. The system, based on
Hewlett Packard Precision Architecture processors,
supports multiple active user processes and multiple
execution threads within the operating system. Using
detailed timing for hardware delays, a customized
approximate closed queueing model is developed for the
multiprocessor system. The model evaluates delays due
to bus and memory contention, and cache interference.
It predicts bus bandwidth requirements and utilizations
for the bus and memory controllers. An extension to
handle I/O traffic is outlined. Applications are
profiled on the basis of execution traces on
uniprocessor systems to provide inputs parameters for
the model. Performance effects of various detailed
architectural tradeoffs (memory interleaving, lower
memory latencies) are examined. The sensitivity of
overall system performance to various parameters is
explored. Preliminary measurements of uniprocessor
systems are compared against the model predictions. A
prototype multiprocessor system is under development.
We intend to validate the modeling results against
measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1990:PAC,
author = "Erol Gelenbe",
title = "Performance analysis of the {Connection Machine}",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "183--191",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98757",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an analysis of the performance of
the Connection Machine, with special emphasis on
estimating the effect of its interprocessor
communication architecture. A queueing model of the
network architecture, including the NEWS and ROUTER
networks, is used to compute the slow-down induced by
message exchange between processors. Locality of the
message exchanges is modelled by message sending
probabilities which depend on whether a message is sent
by a processor to another processor placed on the same
NEWS network, or on the same ROUTER, or at a ``remote''
location which is only accessible via the ROUTER
network. The specific slotted TDMA structure of the
ROUTER Network communications is taken into account.
The performance degradation of the Connection Machine
as a function of the communication and architecture
parameters is derived.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Willick:1990:AMM,
author = "Darryl L. Willick and D. L. Eager",
title = "An analytic model of multistage interconnection
networks",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "192--202",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98758",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiprocessors require an interconnection network to
connect processors with memory modules. The performance
of the interconnection network can have a large effect
upon overall system performance, and, therefore,
methods are needed to model and compare alternative
network architectures. This paper is concerned with
evaluating the performance of multistage
interconnection networks consisting of $ k \times s $
switching elements. Examples of such networks include
omega, binary $n$-cube and baseline networks. We
consider clocked, packet switched networks with buffers
at switch output ports. An analytical model based on
approximate Mean Value Analysis is developed, then
validated through simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dussa:1990:DPT,
author = "K. Dussa and B. Carlson and L. Dowdy and K.-H. Park",
title = "Dynamic partitioning in a transputer environment",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "203--213",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98759",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel programs are characterized by their speedup
behavior. As more processors are allocated to a
particular parallel program, the program (potentially)
executes faster. However, there is often a point of
diminishing returns, beyond which extra allocated
processors cannot be used effectively. Extra processors
would be better utilized by allocating them to another
program. Thus, given a set of processors in a
multiprocessor system, and a set of parallel programs,
a partitioning problem naturally arises which seeks to
allocate processors to programs optimally. The problem
addressed in this paper is dynamic partitioning. When
the number of executable parallel programs changes, the
optimal partition sizes also change. To realize the new
partition settings, a dynamic repartitioning of all
processors is triggered. When extra processors suddenly
become available to a running program due to a program
departure, or when processors suddenly are taken away
from a running program due to a program arrival, a
nontrivial repartitioning overhead occurs. Depending
upon the specific environment, this overhead cost may
negate any potential repartitioning benefit. To gain
insight into this dynamic partitioning problem, a
specific system, a specific workload, and a specific
analytical model are studied. The specific system is an
INMOS transputer system consisting of an IIP Vectra
front-end, an INMOS B004 evaluation board with a single
T414 transputer, and an EB8-10 board with eight T800
transputers. The specific workload consists of parallel
versions of a classical N-body problem and a classical
search problem. The specific analytical model is a
Markov model which is parameterized using the concept
of program execution signatures. The sensitivity
analysis experiments both validate the model and
indicate the characteristics of those workloads which
benefit from dynamic partitioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zahorjan:1990:PSS,
author = "John Zahorjan and Cathy McCann",
title = "Processor scheduling in shared memory
multiprocessors",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "214--225",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98760",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing work indicates that the commonly used
``single queue of runnable tasks'' approach to
scheduling shared memory multiprocessors can perform
very poorly in a multiprogrammed parallel processing
environment. A more promising approach is the class of
``two-level schedulers'' in which the operating system
deals solely with allocating processors to jobs while
the individual jobs themselves perform task dispatching
on those processors. In this paper we compare two basic
varieties of two-level schedulers. Those of the first
type, static, make a single decision per job regarding
the number of processors to allocate to it. Once the
job has received its allocation, it is guaranteed to
have exactly that number of processors available to it
whenever it is active. The other class of two-level
scheduler, dynamic, allows each job to acquire and
release processors during its execution. By responding
to the varying parallelism of the jobs, the dynamic
scheduler promises higher processor utilizations at the
cost of potentially greater scheduling overhead and
more complicated application level task control
policies. Our results, obtained via simulation,
highlight the tradeoffs between the static and dynamic
approaches. We investigate how the choice of policy is
affected by the cost of switching a processor from one
job to another. We show that for a wide range of
plausible overhead values, dynamic scheduling is
superior to static scheduling. Within the class of
static schedulers, we show that, in most cases, a
simple ``run to completion'' scheme is preferable to a
round-robin approach. Finally, we investigate different
techniques for tuning the allocation decisions required
by the dynamic policies and quantify their effects on
performance. We believe our results are directly
applicable to many existing shared memory parallel
computers, which for the most part currently employ a
simple ``single queue of tasks'' extension of basic
sequential machine schedulers. We plan to validate our
results in future work through implementation and
experimentation on such a system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leutenegger:1990:PMM,
author = "Scott T. Leutenegger and Mary K. Vernon",
title = "The performance of multiprogrammed multiprocessor
scheduling algorithms",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "226--236",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling policies for general purpose
multiprogrammed multiprocessors are not well
understood. This paper examines various policies to
determine which properties of a scheduling policy are
the most significant determinants of performance. We
compare a more comprehensive set of policies than
previous work, including one important scheduling
policy that has not previously been examined. We also
compare the policies under workloads that we feel are
more realistic than previous studies have used. Using
these new workloads, we arrive at different conclusions
than reported in earlier work. In particular, we find
that the ``smallest number of processes first'' (SNPF)
scheduling discipline performs poorly, even when the
number of processes in a job is positively correlated
with the total service demand of the job. We also find
that policies that allocate an equal fraction of the
processing power to each job in the system perform
better, on the whole, than policies that allocate
processing power unequally. Finally, we find that for
lock access synchronization, dividing processing power
equally among all jobs in the system is a more
effective property of a scheduling policy than the
property of minimizing synchronization spin-waiting,
unless demand for synchronization is extremely high.
(The latter property is implemented by coscheduling
processes within a job, or by using a thread management
package that avoids preemption of processes that hold
spinlocks.) Our studies are done by simulating abstract
models of the system and the workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dawkins:1990:ESM,
author = "W. P. Dawkins and V. Debbad and J. R. Jump and J. B.
Sinclair",
title = "Efficient simulation of multiprogramming",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "237--238",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1990:MFC,
author = "Scott Shenker",
title = "Making flow control work in networks: a
control-theoretic analysis of gateway service
disciplines",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "239--240",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1990:MGW,
author = "Scott Shenker",
title = "Making greed work in networks: a game-theoretic
analysis of gateway service disciplines",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "241--242",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghandeharizadeh:1990:FAP,
author = "Shahram Ghandeharizadeh and David J. DeWitt",
title = "Factors affecting the performance of multiuser
database management systems",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "243--244",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98765",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While in the past 20 years database management systems
(DBMS) have become a critical component of almost all
organizations, their behavior in a multiuser
environment has surprisingly not been studied
carefully. In order to help us understand the multiuser
performance of the multiprocessor Gamma database
machine [DEWI90], we began by studying the performance
of a single processor version of this system. In this
paper, we describe some of the factors that affect the
performance of DBMS in a multiuser environment. We
refer the interested reader to [GHAN90] for more
details. For these experiments, the Gamma software was
run on a VAX 11/750 with 2 megabytes of memory and a
330 megabyte Fujitsu disk drive. An 8 Kbyte disk page
was used and the buffer pool was set at 61 pages. A
second processor was used to simulate users submitting
queries. In a DBMS, queries can be classified according
to their pattern of disk accesses. Those that either
sequentially scan all the pages of a relation or use a
clustered index to retrieve only those pages containing
tuples that satisfy a selection predicate, access the
disk sequentially. Queries that use a non-clustered
index to process a query tend to access disk pages
randomly. For those queries that access the disk
sequentially, it is very important to avoid random disk
accesses in presence of multiple, concurrently
executing queries. Consider a query that selects 1
tuple from a 12,500 tuple relation (each tuple is 208
bytes long) by sequentially scanning the relation. As
shown in Figure 1, as the multiprogramming level (MPL)
is increased from 1 to 2, the throughput of the system
actually decreases. In the case of a high degree of
data sharing, the two concurrently executing queries
will generally access the same relation (out of a set
of 10 identical relations). However, this does not
necessarily mean that these queries are sufficiently
synchronized to share pages in the buffer pool. The
result is that the disk ends up performing a series of
random disk requests instead of a series of sequential
disk requests had each query been submitted
consecutively. The random disk requests result in a
higher average seek time. As shown in Figure 1, the
drop in throughput is largest for the low degree of
data sharing as the two concurrently executing queries
may access any relation in the database. Thus, on the
average the head of the disk must travel a longer
distance on each disk access and since the average seek
time increases as a function of the square root of the
distance traveled by the head of the disk, the average
service time of the disk is higher. To further
illustrate the complex behavior that a database system
can exhibit, consider a range selection query that uses
a non-clustered index to select 15 tuples out of a
12,500 tuple relation. Since with a non-clustered index
the order of index records is not the same as the order
of the tuples in the indexed relation, each tuple
retrieved results in a random disk I/O. As shown in
Figure 2, the throughput of the system is highest for
the high degree of data sharing because when a query
commits and its corresponding terminal submits a new
query, the new query will generally access the same
relation as the previous query. The result is that the
required index pages will generally be resident in the
buffer pool. On the other hand, the probability that
the newly submitted query will access the same relation
as the previous query is much lower with the low and
medium degrees of data sharing than with the high
degree of data sharing. Furthermore, since each query
processes a large number of pages, the execution of one
query tends to flush the buffer pool of pages from some
previously accessed relation resulting in a very low
percentage of buffer pool hits for subsequent queries
as illustrated in Figure 3. For each of the degrees of
data sharing, the throughput of the system increases
from a multiprogramming level (MPL) of one to twelve.
But observe from Figure 4 that the disk becomes 100\%
utilized at a MPL of four. The reason that the
throughput continues to increases from a MPL of 4 to 12
is because the disk controller utilizes an elevator
algorithm when more than two disk requests are pending
and consequently enforces some locality of reference on
the random disk accesses. The result is that the
average seek time decreases. At MPLs higher than
twelve, the throughput of the system begins to decrease
for each of the degrees of data sharing due to the
decrease in percentage of buffer pool hits (see Figure
3). Recall that all the disk requests made by this
query type are random and that the buffer pool utilizes
an LRU replacement policy for all the pages (index +
data). At MPLs higher than twelve, the data pages begin
to compete with index pages for the buffer pool
resulting in a decrease in the percentage of buffer
pool hits. In addition, this increases the load on the
disk and reduces the load on the CPU resulting in a
drop in CPU as shown in Figure 5. Other factors that
affect the performance of a DBMS include the use of a
software read-ahead mechanism and the availability of a
hardware disk cache. We have observed up to a 30\%
improvement in throughput with a software read-ahead
mechanism. Its benefits, however, diminish when the
disk becomes 100\% utilized. While a track-size
hardware disk cache is extremely beneficial for
sequential scan queries executing by themselves, such a
mechanism provides only very marginal benefits in a
multiuser environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Englert:1990:BNS,
author = "Susanne Englert and Jim Gray and Terrye Kocher and
Praful Shah",
title = "A benchmark of {NonStop SQL release 2} demonstrating
near-linear speedup and scaleup on large databases",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "245--246",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98766",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Somani:1990:PMR,
author = "Arun K. Somani and James A. Ritcey and Stephen H. L.
Au",
title = "Phased mission reliability analysis",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "247--248",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitchell:1990:PAF,
author = "Lionel C. Mitchell",
title = "Performance analysis of a fault tolerant computer
system",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "249--250",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98769",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the description of an analytical
queueing network model of a Tandem computer system in
the FAA Remote Maintenance Monitoring environment and a
performance analysis of the Maintenance Processor
Subsystem for the 1990s time frame. The approach was to
use measurement data to quantify application service
demands and performance contributions of the
fault-tolerant software in the Tandem environment in an
analytical queueing network model. Sensitivity analyses
were conducted using the model to examine alternative
configurations, workload growth, and system overhead
among others. The model framework and performance
analysis methodology can be used for capacity planning
purposes during the operational phase of the system.
The Federal Aviation Administration (FAA) is
responsible for the many critical functions of the
National Airspace System (NAS). Many of these functions
have very high availability requirements. One such
function is Remote Maintenance Monitoring (RMM). The
FAA has implemented prototype versions of portions of
this system on the Tandem fault tolerant computer
architecture. The Maintenance Processor Subsystem (MPS)
implements monitor/control and management information
functions within FAA's Remote Maintenance Monitoring
System (RMMS). MPSs are located at 23 Air Route Traffic
Control Centers (ARTCC) and various other FAA sites.
These computers remotely monitor and control sensors.
The RMMS components are in various stages of
development. The MPS currently consists of a
multi-processor Tandem configuration with initial
versions of the monitor/control and management
information software. Only a small number of remote
sensors are currently monitored via point-to-point
communication links. The performance evaluation of the
FAA's MPS involved the following steps: assess the
functional and performance requirements; develop and
validate a baseline model of the MPS prototype Tandem
system; modify the baseline model to represent future
MPS configuration and transaction requirements; and
evaluate predicted performance. The functional and
performance requirements of the MPS were determined
primarily from FAA documentation and personnel.
Performance data from a prototype MPS site at the
Memphis ARTCC, collected by the Tandem XRAY monitor,
were used to quantify model priority, service demand
and workload intensity parameters, and to validate the
baseline model using response time and utilization
metrics. Configuration specification on the Memphis
node was also collected for the use in the model. The
model was developed using the CTA queueing network
package Performance Analysis Tool Box (PATB). The model
of the Tandem computer represents the non-stop
processing operation implemented by Tandem's
Transaction Monitoring Facility (TMF) and the mirrored
disk writing operation. In addition, the model
represents the GUARDIAN operating system priority
scheduler, CPU burst size, interrupt processing, and
memory swapping. The basic modeling approach was to use
measurement data to represent the complex fault
tolerant activities in an analytical queueing network
model. A model of Memphis MPS node was developed to
serve as a baseline for examining the performance of
future ARTCC MPS configurations. The model was
developed using the PATB queueing network tool which
implements a Linearizer mean value analysis algorithm.
The MPS functional and performance requirements and the
XRAY measurement data were used to define the software,
communication, and workload characteristics of the
model. The XRAY measurement data and configuration
information on the Memphis MPS node and Tandem
information were used to define the hardware and system
software characteristics and to quantify the processing
and I/O service demands for the application and system
software. The basic components of the PATB model are:
CPU, disk, and communication link hardware components;
the application and system software program elements
including the fault tolerant functions; and the
application and overhead workload, or transaction,
flows. The local terminals were implicitly represented
as the source of the transactions. The Remote
Monitoring Subsystem (RMS) sensor devices were
represented as transaction sources and sinks. The
interprocessor bus, the device controllers and the I/O
bus were not included in the model. Their contribution
to performance was judged to be insignificant based on
examination of measurement data. The fault-tolerant
check-point functionality of Tandem's Transaction
Monitoring Facility was represented by including the
TMF processing and I/O activities as serial delays on
the transaction flows for application workloads. The
mirrored disk writing was reflected in the I/O service
demand data from XRAY and did not require any further
model representation. Memory contention was modeled in
a separate PATB model. Both models assume a normal
operational scenario (i.e., failure modes are not
modeled). The baseline performance model was validated
using the XRAY data from the Memphis MPS site. The
primary performance metric used in the model validation
was average terminal response time. Model response time
was within 15 percent of measured response time. One
parameter examined in the validation exercise was CPU
burst size. Using average burst size instead of the
operating system maximum provided better agreement of
model results with measured results. The MPS baseline
model was modified to represent different possible MPS
configurations for the 1990s. The changes in the model
reflected additional and faster CPU, disk and
communication servers and modification of software CPU
residency and workload flows. Various alternatives were
examined for hardware and software configuration,
number of sensor devices monitored, terminal
transaction load, and system overhead and application
software service demands. In addition to the detailed
model of the application and system software a
flow-equivalent queueing network model was developed,
using PATB, to examine the impact of memory queueing
for the proposed configuration. The model was developed
to examine the impact of: the operating system policy
of ``cloning'' processes subject to queue length
threshold; additional application software functions
not yet implemented; uncertainty of expected
transaction rate; and additional system software
storage requirements. The results of the analysis are
being used by the FAA to define the MPS performance
requirements for the 1995 time frame. The MPS model may
be used in the future for capacity planning and
performance optimization exercises for different MPS
field configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jensen:1990:RTD,
author = "David W. Jensen and Daniel A. Reed",
title = "Ray tracing on distributed memory parallel systems",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "251--252",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98770",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Among the many techniques in computer graphics, ray
tracing is prized because it can render realistic
images, albeit at great computational expense. In this
note we explore the performance of several approaches
to ray tracing on a distributed memory parallel system.
A set of performance instrumentation tools and their
associated visualization software are used to identify
the underlying causes of performance differences.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mirchandani:1990:CME,
author = "Dinesh Mirchandani and Prabuddha Biswas",
title = "Characterizing and modeling {Ethernet} performance of
distributed {DECwindows} applications",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "253--254",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McGehearty:1990:COPa,
author = "Patrick F. McGehearty",
title = "Challenges in obtaining peak parallel performance with
a {Convex C240}, parallel vector processor",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "255--256",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98773",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This report examines the behavior of the Linpack $ 300
\times 300 $ benchmark [Dongarra] on a parallel vector
machine. It is observed that the performance of several
parallel vector machines on this application is far
below their nominal peak performance. Dissection of the
internals of the algorithms shows how peak performance
is limited. The insights gained provide guidance to
algorithm developers as to ways to make maximum use of
architectural strengths. System architects may gain
insight about which system characteristics to optimize
to increase the performance of future designs for this
class of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heimlich:1990:TCN,
author = "Steven A. Heimlich",
title = "Traffic characterization of the {NSFNET} national
backbone",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "257--258",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98774",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditionally, models of packet arrival in
communication networks have assumed either Poisson or
compound Poisson arrival patterns. A study of a token
ring local area network (LAN) at MIT [5] found that
packet arrival followed neither of these models.
Instead, traffic followed a more general model dubbed
the ``packet train,'' which describes network traffic
as a collection of packet streams traveling between
pairs of nodes. A packet train consists of a number of
packets traveling between a particular node pair. This
study examines the existence of packet trains on
NSFNET, a high speed national backbone network. Train
characteristics on NSFNET are not as striking as those
found on the MIT local network; however, certain
protocols exhibit quite strong train behavior given the
great number of hosts communicating through the
backbone. Descriptions of the packet train model can be
found in [3] and [5].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Davidson:1990:EEA,
author = "Jack W. Davidson and David B. Whalley",
title = "{Ease}: an environment for architecture study and
experimentation",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "259--260",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98775",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gathering detailed measurements of the execution
behavior of an instruction set architecture is
difficult. There are two major problems that must be
solved. First, for meaningful measurements to be
obtained, programs that represent typical work load and
instruction mixes must be used. This means that
high-level language compilers for the target
architecture are required. This problem is further
compounded as most architectures require an optimizing
compiler to exploit their capabilities. Building such a
compiler can be a formidable task. The second problem
is that gathering detailed dynamic measurements of an
architecture using typical user programs reading
typical data sets can consume significant computation
resources. For example, a popular way to gather
execution measurements is to simulate the architecture.
This technique is often used when the architecture in
question does not yet exist, or is not yet stable and
available for production use. Depending on the level of
the simulation, programs can run 100 to 500 times
slower than directly-executed code [HUGU87]. Tracing is
another alternative one can use if the architecture
being measured exists, is accessible, and tracing is
possible on that machine. Tracing can be even slower
than simulation [HUGU87]. Because of the large
performance penalties with these methods, the tendency
is to use small programs with small data sets. The
relevance of measures collected this way is always
subject to question. This paper describes an
environment called ease (Environment for Architecture
Study and Experimentation) that solves both these
problems. It consists of a easily retargetable
optimizing compiler that produces production-quality
code. The compiler also supports the generation of
instrumented code that gathers very fine-grained
execution statistics with little overhead. Typically,
instrumented code runs 10 to 15 percent slower than
code that is not instrumented. Similarly, because
information about instructions are collected as a side
effect of the compiler generating code, compilation
time is only increased by 15 to 20 percent. The
combination of an easily retargetable compiler and an
efficient method of observing the run-time behavior of
real programs provides an environment that is useful in
a number of contexts. ease logically consists of two
parts; the set of tools for building optimizing
compilers quickly and the tools that produce and
analyze the measurements of the execution behavior of
the instruction set architecture. The compiler
technology is known as vpo [BENI88, DAVI84, DAVI86]. An
efficient way to collect measurements for subsequent
analysis is to modify the back end of the compiler to
store the characteristics of the instructions to be
executed and to produce code that will count the number
of times that each instruction is executed. These
modifications have been implemented in vpo and are
shown in Figure 1. The first modification necessary to
collect measurements is to have vpo save the
characteristics of the instructions that will be
executed. During code selection, information about the
characteristics of the instructions are gathered and
used for semantic checks. The semantic checks are
extended to store these characteristics with the
instruction by invoking a machine-independent routine.
After all optimizations have been completed, the
information about each instruction is then written to a
file for subsequent processing. The second modification
is to have vpo generate code to count the number of
times each instruction is executed. Again this is
accomplished after all optimizations have been
performed. Within each function there are groups of
instructions, basic blocks, that are always executed
the same number of times. There are also groups or
classes of basic blocks that are executed the same
number of times and these are denoted as execution
classes. Using the dataflow information collected by
the optimizer, the execution classes are determined and
code to count the number of times that each execution
class is executed is inserted at the beginning of the
first basic block in the execution class. At the end of
the execution of the program, the number of times that
each execution class is executed is written to a file.
The execution counts and the characteristics of the
instructions can then both be used to produce dynamic
measurements. The characteristics of the instructions
can also be used to produce static measurements. ease
has been ported to ten different machines to compare
current architectures. Measurements from the execution
of a test set of nineteen C programs were obtained for
each of the architectures. The detail and accuracy of
the reports produced by ease allowed insights to be
drawn when analyzing the measurements. The measurements
collected include: instruction path length instruction
path size instruction type distribution addressing mode
distribution memory reference size distribution memory
reference address distribution register usage condition
code usage conditional branches taken average number of
instruction between branches data type distribution The
measurements are sufficiently detailed to determine the
number of times each combination of addressing mode and
data type is used for each field of each type of
instruction. Results comparing the ten architectures
analyzed appears in WHAL89. In addition to using ease
to evaluate and analyze existing instruction set
architectures, it can be used to help design new
machines [DAVI89b]. In this case, vpo emits code for an
existing host machine that emulates the instruction set
of the machine being designed. vpo's organization
permits this to be done quickly and easily as follows.
The last step in the compilation process is the
conversion of a machine-independent representation of
an instruction to assembly language for the target
machine and its emission to a file that will be
processed by the system's assembler. In order to
evaluate an architecture that does not exist, rather
than emit assembly code for the target machine,
assembly code for an existing architecture is emitted.
Information about the effects of the instruction are
emitted as if the target architecture existed. ease has
also been used to analyze different code generation
strategies. For instance, by recompiling the source
files from the C run-time library, different calling
sequence conventions have been investigated [DAVI89a].
By extracting measurements of the behavior of the code,
the effect of any change can be easily observed. This
environment for the collection of architectural
measurements has been designed to require little effort
when retargeting for a new architecture. Since the code
selector and other optimizations are constructed
automatically, a vpo-based compiler is easy to
retarget. Because the optimizer stores information
about instructions using a machine-independent
representation, it is easy to produce assembly code for
both existing and proposed architectures and to store
instruction information for the collection of
measurements. Most of the code to perform the
extraction of measurements is also machine-independent.
A vpo-based C compiler for ten different machines was
modified to collect measurements as specified above.
For each machine, it typically took three to four hours
to make the necessary machine-dependent modifications
to the compiler. The ease environment has been shown to
be an efficient tool for architectural evaluation and
design. Since accurate and detailed reports can be
produced for a variety of measurements, the impact of
each modification to the compiler or architecture can
easily be determined. This allows one to use an
iterative design method for evaluation of performance
in a quantitative manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Waclawsky:1990:DQB,
author = "John G. Waclawsky and Ashok K. Agrawala",
title = "Dynamic queue behavior in networks with window
protocols",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "261--262",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98777",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we employ a deterministic analysis
technique to characterize the dynamic queueing aspects
of window protocols. The deterministic behavior of
these protocols and the deterministic influence of the
resources along the physical path are explicitly
considered in the evaluation of path queue behavior.
Transient and steady state queue behavior of fixed and
sliding window protocols are investigated. We discover
the existence of significant nonlinearities in the
dynamics of queue activity. Window protocols are viewed
as logical simplex pipes. These pipes connect a sender
and a receiver through a series of heterogeneous
physical resources which provide a path of finite delay
between them. Links and nodes make up the path
resources which supply physical connectivity. The
resource with the largest delay is called the
bottleneck resource. Dynamic queue behavior is obtained
by explicitly considering the fact that feedback
mechanisms employed by window protocols make them
inherently cyclic. Thus a group of packets, called a
window, enters the network every cycle. The concept of
a window can be formalized in terms of containers which
are made available to carry packets through a path.
Packets cannot be transmitted without a container.
Controlling the number of containers available at the
protocol sender controls the amount of data flowing in
the path. Packet transmission by the sender, using the
first link, can occur when either the link changes its
status from busy to free or an acknowledgement is
received. The sender is considered ``greedy'' since
fundamental sender operation is to transmit as long as
both packets and containers are available.
Deterministic behavior occurs whenever the arrival rate
of packets to the sender is such that there is always a
packet available for transmission. This situation
occurs frequently in networks for all types of traffic.
In fact, the whole class of ``Batch'' traffic satisfies
this arrival situation because of the rapid generation
of packets by batch applications. The following
assumptions were employed for this analysis. The path
is initially empty. Packets are always available for
transmission by the sender. Thus data flow only stops
when the sender expires his container supply. All
packets (including those containing a request or
acknowledgement) are the same size. No cross traffic is
present. There is no loss or reordering of packets. All
resources follow a work conserving discipline. We
define that departures from one resource occur at the
same time instant as arrivals to the next resource.
Fundamental packet and resource activity shows that the
bottleneck exerts a major influence on path behavior.
This is seen for two reasons. First, when load is
heavy, packets depart from the path under control of
the bottleneck. Thus, the bottleneck controls path
throughput. Second, if a packet is delayed anywhere
along the path it also waits at the bottleneck. Thus,
the bottleneck controls the timing of window protocol
acknowledgements and all resource utilizations. The
queue formation process is seen as a by-product of the
heterogeneous delays that exist along a path. Whenever
a higher speed resource exists at the sender, then
queue sizes increase normally at slower resources along
a path during any period of continuous sender
transmission. Clearly, if path resource delays are
equal along a path or a slower resource exists
``upstream'', then no queue buildup can occur
``downstream'' from the slower or equal speed resource.
Thus, queue build up along a path only occurs at, or
prior to, the bottleneck location. Once the path is
full, whenever both the bottleneck and the protocol
sender are transmitting, then packet build up along the
path occurs at the same rate that containers are
consumed at the sender. Since the arrival rate of
packets to any queue is limited by the slowest upstream
resource in the path, we only examine paths with
increasing resource delays. Paths without these exact
characteristics do make up a substantial portion of
many actual network environments. Queues within these
paths can be analyzed by looking further upstream for
an appropriate arrival rate. This is done by shifting
packet arrival times through the use of a constant for
each queue. Results show that window protocol activity,
along with physical path delays and the value of the
window size, controls both the magnitude of queue sizes
and their rate of change. In addition the cyclic
behavior of the window protocol sender causes cyclic
queue activity all along the path. Queue activity is
found to have three distinct phases. The initial phase
describes queue build up behavior. This phase begins
with the arrival of the first bit of the initial packet
at any queue. Packets arrive at a rate controlled by
the previous upstream link. Queue build up continues
until packet arrivals from the previous upstream
resource temporarily stops. The second phase describes
a short pause until arrivals begin again. Thus, any
queue built up during the first phase begins draining.
The third phase consists of a queue finding a cyclic
pattern of packet arrivals from a previous resource.
Solutions for the occurrence of each phase can be
obtained through an iterative process. This process
involves solving for the same information in the
previous resource queues back to the base case of the
window protocol sender. Additional results show the
behavior of window protocols often forces large queues
to appear near a window protocol sender during initial
protocol activity. At each queue, the maximum queue
size occurs at or right after queue depletion of the
previous upstream resource. Thus queues always drain
and appear further ``downstream'' as data transfer
continues. We refer to this activity as queue
migration. The speed at which a particular queue drains
is called the Queue Drain Rate. This rate is shown to
be a function of the speed of the resource the queue is
feeding and of the bottleneck speed. Queues can be
considered migrating at the Queue Drain Rates of the
various resources. Queue migration continues until the
bottleneck is reached. At this point in time, if the
window size is large enough, a large queue can be (and
often is) permanently maintained at the bottleneck.
This behavior agrees with similar behavior described by
finite population closed queueing systems. These
systems observe that at steady state you are most
likely to find a queue in front of the bottleneck
resource. Steady state begins once sender transmission
becomes cyclic at the bottleneck rate. The queue
migration process begins at this same time. One
intriguing result is that once the sender enters steady
state, the total queue time along the path for the
request packets is an invariant. This is true even
while queue migration is still occurring. It is
interesting to note that despite of the wide spread use
of window protocols no deterministic analysis of their
queueing behavior seems to exist. Yet, the approach
taken in this research appears very promising. Because
deterministic dependencies are most evident when a load
exists, this deterministic analysis technique also
allows the accurate determination of queueing activity
during significant network load, a time network
designers consider most critical. The results are
applicable to the window protocol mechanisms for
congestion and flow control in SNA, and TCP.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garofalakis:1990:PMI,
author = "John D. Garofalakis and Paul G. Spirakis",
title = "The performance of multistage interconnection networks
with finite buffers",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "263--264",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multistage interconnection networks with crossbar
switches are a major component of parallel machines. In
this paper we analyze Banyan networks of $k$ by $k$
switches and with finite buffers. The exact solution of
the steady state distribution of the first stage is
derived in the situation where packets are lost when
they encounter a full buffer (Assumption A). The
solution is a linear combination of $ k - 1$
geometrics. We use this to get an approximation for the
steady state distributions in the second stage and
beyond. As a side effect, the infinite buffer case is
solved, confirming known results. Our results are
validated by extensive simulations. An alternate
situation of networks where full buffers may block
previous switches is also analyzed through an
approximation technique (Assumption B).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vasilakos:1990:AWF,
author = "Athanasios V. Vasilakos and Christos A. Moschonas and
Constantinos T. Paximadis",
title = "Adaptive window flow control and learning algorithms
for adaptive routing in data networks",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "265--266",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a new adaptive flow control algorithm
together with learning routing algorithms. The key
performance measure in both algorithms is packet delay.
Window adjustment and route selection are based on
delay measurements. Simulation results have shown the
superiority of the new scheme over existing
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nussbaum:1990:MCS,
author = "Daniel Nussbaum and Ingmar Vuong-Adlerberg and Anant
Agarwal",
title = "Modeling a circuit switched multiprocessor
interconnect",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "267--269",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98781",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaither:1990:ER,
author = "Blaine D. Gaither",
title = "{Editor}'s readings",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "25--26",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.1045579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vance:1990:ARM,
author = "R. E. Vance",
title = "Article review: {`A message-based approach to discrete
event simulation' by R. L. Bagrodia, K. M. Chandy, and
J. Misra. IEEE Trans. Softw. Eng. SE-13, 6 (June
1987)}",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "27--27",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.1045580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As a service to our readers, PER has reached an
agreement to reprint reviews of books and papers on
simulation and modeling that originally appeared in ACM
{\em Computing Reviews}. CR is a monthly journal that
publishes critical reviews on a broad range of
computing subjects including simulation and modeling.
As an ACM member, you can subscribe to CR by writing to
ACM Headquarters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allen:1990:AMS,
author = "Arnold O. Allen and Gary Hynes",
title = "Approximate {MVA} solutions with fixed throughput
classes",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "31--40",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.101321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Transaction (open) type workloads are often used in
approximating computer system workloads which are
actually closed because open workloads provide
reasonable estimates in many cases and their solutions
are straight-forward. We have found that their use can
distort the results for many workloads in a multiclass
queueing network model of a computer system. We have
replaced transaction workloads with what we call {\em
fixed class\/} workloads. We present an approximate
algorithm based on MVA that represents a class with a
given throughput by a corresponding terminal or batch
class, which we call a fixed class workload. We solve
for the closed population required to deliver the
requested throughput. We also present techniques for
overcoming problems encountered in the solution of some
fixed class models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McGehearty:1990:COPb,
author = "Patrick McGehearty",
title = "Challenges in obtaining peak parallel performance with
a {Convex C240}, a parallel vector processor",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "41--47",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.101322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The behavior of the Linpack $ 300 \times 300 $
benchmark is examined in the context of a parallel
vector machine architecture. Detailed evaluation is
performed with respect to the Convex C240. Issues
relating to algorithm design and system characteristics
are discussed in the context of the Linpack
implementation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaither:1990:SVP,
author = "Blaine Gaither",
title = "Scientific visualization of performance data:
evaluation of {DV-Draw}",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "48--53",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.101323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This report discusses the attributes of the {\em
DV-Draw\/} package from the VI Corporation of Amherst,
Massachusetts. {\em DV-Draw\/} is a scientific
visualization package which is part of a larger package
called DataViews. The requirements for visualization
software in performance evaluation are identified. The
results of applying {\em DV-Draw\/} to animate the
output of an architectural model were successful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Taheri:1990:ANN,
author = "H. Reza Taheri",
title = "An analysis of the {Neal Nelson Business Benchmark}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "13--18",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.122236",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Neal Nelson Business Benchmark is an
industry-standard benchmark which is used to evaluate
the performance of UNIX computer systems. The Business
Benchmark purports to give the user an idea of the
performance of the machine under real business UNIX
workloads. In this article we will show that the
Business Benchmark stresses few components of the
system with very simple tests. As such it is more
suited as a component level benchmark or users who want
to focus on the performance of a particular aspect of
the system, rather then a system-level UNIX benchmark
representative of commercial applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keller:1990:SBC,
author = "Tom Keller",
title = "{SPEC} benchmarks and competitive results",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "19--20",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.122237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In less than a year since its Introduction the System
performance Evaluation Cooperative (SPEC ((TM)))
benchmarks have established themselves as an Important
and widely distributed benchmark suite for engineering
and scientific workstations, displacing the old
standards Dhrystone, Linpack and Whetstone. This is
because most workstation vendors support SPEC and have
participated in developing both the benchmarks and a
benchmarking methodology that overcome many of the
failings of the old benchmark standards. SPEC's strong
endorsement by EE TIMES newsmagazine helps insure that
SPEC results are heavily publicized in the Industry.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRCa,
author = "David Finkel",
title = "Book review: {`Computer Systems Performance Management
and Capacity Planning' by J. Cady and B. Howarth
(Prentice-Hall, 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "21--21",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I reviewed this book from the author's manuscript. The
book is now being typeset, and the author tells me that
it is due to appear in February, 1991.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRA,
author = "David Finkel",
title = "Book review: {`The Art of Computer Systems Performance
Analysis' by R. Jain (Wiley-Interscience, 1991)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "21--22",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book grew out of the authors' experience teaching
a course in performance evaluation to final year
undergraduates. This heritage of undergraduate origins
shows up throughout the book. The mathematics is
presented very gently. For example, several complex
formulas are written out twice, once without summation
notation (i.e., $ a_1 $ +$ a_2 $ \ldots{} +$ a_n$) and
then again with summation notation ({\Sigma} $ a_i$).
There are numerous worked out examples, and a wide
range of exercises, from simple ones that just use the
formulas in the text to more challenging exercises.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRCb,
author = "David Finkel",
title = "Book review: {`Computer and Communication Systems
Performance Modelling' by Peter J. B. King (Prentice
Hall 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "22--22",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The preface to this book sets out the author's thesis,
that `computer science students tend to view
performance analysis as a practical discipline \ldots{}
[and] often prefer to rely on physical insight and
intuition rather than formal insights.' Accordingly,
the author's approach is to emphasize useful methods
and applications, rather than formal mathematical
derivations. The background expected of students is
basic operating systems, machine architecture, data
structures, and elementary calculus and basic
probability theory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRQ,
author = "David Finkel",
title = "Book review: {`Quantitative Analysis of Computer
Systems' by C. H. C. Leung (Wiley, 1988)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "22--23",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is, according to the author, `designed for
final year undergraduates in computer sciences, or
conversion course MSc students.' It presumes some
background in elementary probability theory, although
this material is reviewed early in the book.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRF,
author = "David Finkel",
title = "Book review: {`Fundamentals of Performance Modeling'
by M. K. Molloy (Macmillan, 1989)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "23--23",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is intended, according to the author's
preface, for the undergraduate computer science student
without an extensive mathematical background. The book
itself provides the mathematical background, through a
chapter on probability theory, a chapter on transform
theory, and an appendix on mathematical formulas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saavedra-Barrera:1990:MCB,
author = "Rafael Saavedra-Barrera and Alan J. Smith and Eugene
Miya",
title = "Machine Characterization Based on an Abstract
High-level Language Machine",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "24--24",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A {\em linear performance model\/} decomposes program
execution into $n$ distinct operations, such as adds,
multiplies, loads, and stores. A program is
characterized as an $n$-vector of operation counts, and
a machine is characterized as an $n$-vector of
operation times. The dot-product of the two is the time
required for the machine to execute the program. A
linear performance model has several uses:1. Once every
machine and program is characterized, the performance
of each program on each machine can be predicted
without having to run them.2. Two machines (or
programs) can be compared by comparing corresponding
elements of their parameter vectors. The influence of
individual parameters on overall performance can be
used to predict the effect of design changes.3.
Machines (and programs) can be classified by the
similarity of their parameter vectors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panwar:1990:OSP,
author = "Shivendra S. Panwar and Don Towsley and Jack K. Wolf",
title = "Optimal scheduling policies for a class of queues with
customer deadlines to the beginning of service",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "25--25",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper treats the problem of queueing packets
which have an assigned expiration date. If a packet
does not begin processing within the specified time
limit, it is discarded as useless. The primary example
is transmission of voice or video frames over a
packet-switched network, where the illusion of realtime
transmission is to be maintained. The occasional loss
of a packet will reduce transmission quality, but the
voice or video reception should remain intelligible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tokuda:1990:RTM,
author = "Hideyuki Tokuda and Makato Kotera and Clifford E.
Mercer",
title = "A Real-Time Monitor for a Distributed Real-Time
Operating System",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "26--26",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed real-time systems are difficult to
develop. External events occur independently of
internal control, and the real-time system must be
designed to accommodate them correctly. Two problems
emerging from this are the {\em logical\/} correctness
and the {\em timing\/} correctness of the system
software: not only must it process the real-time events
correctly, but the program timing must prevent the task
of processing from interfering with the task of
monitoring.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thiebaut:1990:FDC,
author = "Dominique Thiebaut",
title = "On the Fractal Dimension of Computer Programs and its
Application to the Prediction of the Cache Miss Ratio",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "41--41",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fractals are complex, nonsmooth functions with simple
recursive characterizations. Many complex phenomena
resemble fractals, and may therefore be analyzable.
Intuitively the repetition structures of a computer
program should produce patterns of fractal behavior.
This paper shows fractal characteristics of cache-miss
and memory-reference patterns across four program
traces. It should be interesting to those wanting a
simple classification of program behavior; cache
designers should use more exact methods, such as
trace-driven simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ponder:1990:PVA,
author = "Carl Ponder",
title = "Performance variation across benchmark suites",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "42--48",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.122238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance ratio between two systems tends to
vary across different benchmarks. Here we study this
variation as a `signature' or `fingerprint' of the
systems under consideration. This `fingerprint' can be
used to guess the performance of programs not
represented in a benchmark suite, assess the breadth
and credibility of the benchmark suite, and infer
details of the system design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRMa,
author = "David Finkel",
title = "Book review: {`Multiprocessor Performance' by Erol
Gelenbe (John Wiley \& Sons, 1989)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "9--9",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book begins with a survey of the different
approaches to paralletizing computation: vector or
array processors, loosely-coupled distributed systems,
and multiprocessor systems. The author then states his
principal thesis, that only multiprocessor systems
offer the potential for unlimited processing power in
the machines of the future. Since the impetus for
designing multiprocessor systems is to improve
performance, it is obviously crucial to evaluate the
performance of these systems. This is the task set out
for the rest of the book.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPb,
author = "David Finkel",
title = "Book review: {`Performance Analysis of Transaction
Processing Systems' by Wilbur H. Highleyman (Prentice
Hall, 1989)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "10--10",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement of computer system performance usually
occupies at most a chapter or two of performance
evaluation texts, but here it is given the book-length
treatment it deserves. The author begins the book with
an introductory chapter discussing the purposes and
goals of performance measurement, which of course
varies from one study to another. He then surveys the
kinds of measurement tools available, and sets out his
philosophy of measurement methodology (which includes
references to Aristotle and the Renaissance world
view), which is expanded in a later chapter.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPa,
author = "David Finkel",
title = "Book review: {`Performance Measurement of Computer
Systems' by Phillip McKerrow (Addison-Wesley 1988)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "10--11",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As its title indicates, this book has the very
specific purpose of applying performance evaluation
tools to the study of on-line transaction processing
systems. The book provides both an overview of the
relevant mathematical methods from performance
evaluation, and an application of those methods to
transaction processing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRMb,
author = "David Finkel",
title = "Book review: {`Multiple Access Protocols: Performance
and Analysis' by Raphael Rom and Moshe Sidi
(Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "11--11",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The author's central thesis is that responsiveness, or
performance, is crucial to the success of software
systems. Thus performance considerations must be a part
of all stages of software development, starting from
the earliest stages of design. The approach uses a
combination of straightforward data collection and
analysis, and mathematically sophisticated techniques.
The mathematical treatment is entirely self-contained,
and no extensive mathematical background is assumed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPc,
author = "David Finkel",
title = "Book review: {`Performance Engineering of Software
Systems' by Connie U. Smith (Addison-Wesley, 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "11--12",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This books concentrates on the important topic of the
performance of computer communications networks,
particularly on the performance of the multiple access
protocols they use. The approach is mathematical, and
the authors clearly state the mathematical
prerequisities they expect from the reader: probability
theory, stochastic processes in general, and Markov
chains and the M/G/1 queue in particular. The
mathematical prerequisities allow the authors to do a
careful and complete job of deriving the results they
need. Each chapter ends with a set of challenging
exercises, for those who wish to use the book as a
text, and the book ends with an extensive
bibliography.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Johari:1991:POH,
author = "Shyam Johari",
title = "Performance objectives --- how to define them",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "18--19",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.122290",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The successful development of a product demands that
product performance objectives be clearly defined and
agreed to as early as possible during the product
development cycle, typically during the product
requirements phase. Unless clearly defined and
uniformly understood, performance objectives can be
subject to varied interpretation as product nears
completion. Why? Because all parties (e.g., Product
Marketing, Product Management, and Product Development)
involved have their own performance perspective. How to
clearly define the product performance objectives would
be the thrust of this note.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ponder:1991:BS,
author = "Carl G. Ponder",
title = "Benchmark semantics",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "20--24",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.122291",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Be careful when interpreting benchmark measurements
that compare two languages or two implementations of
the same language. A program expressed in two different
languages rarely computes the exact same function in
both cases. The same goes for a program run on two
different implementations of the same language. The
implementation details ultimately affect the language
semantics as well as the benchmark performance. Here
are some simple examples of this effect.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cabrera:1991:TSS,
author = "Luis-Felipe Cabrera",
title = "Technical summary of the {Second IEEE Workshop on
Workstation Operating Systems}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "25--32",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.122292",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The broad spectrum of universities, industrial
research laboratories, and computer companies
represented at the Second IEEE Workshop on Workstation
Operating Systems provided a rich snapshot of current
activities in operating systems. There were
representatives of 19 operating system research
projects among the participants and several from
commercial offerings. The attendees came from seven
countries on three continents: North America, Europe,
and South America. Since the last Workshop in 1987,
there have been more advances in hardware than in
software functions. Software standards continue to
emerge in the areas of operating system interfaces,
page description languages, window management
interfaces, and communication protocols. New software
applications exist in the areas of multimedia and
multi-node computing. Object-oriented technology is
already present in running systems and gaining
importance. The areas that the participants perceived
needing most future work were operating system
abstractions, workstation operation, system
responsiveness, input output, network services,
management of clusters of workstations, and failure
handling. While processor speeds, main memory access
speeds, memory density, and secondary storage capacity
continue to increase fast, disk seek times have
decreased only slightly, and the bandwidth of most
local-area networks has not increased at all. FDDI
networks are just beginning to be deployed. The
software is adjusting to this hardware scenario by
using caching at multiple levels of the systems. In the
last two years large main memories at individual
computing nodes and multi-node computer installations
have become common. It is expected that most future
computing nodes will have substantial local storage and
that high-bandwidth networks will enable the support of
continuous media like voice and video. Input output, to
disks, to networks, and to user-oriented devices, is
expected to become the central problem in future
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Melliar-Smith:1991:PAB,
author = "P. M. Melliar-Smith and Louise E. Moser",
title = "Performance analysis of a broadcast communications
protocol",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "1--10",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Trans protocol is a communications protocol that
exploits the broadcast capability of local area
networks. Classical Markov models and queueing theory
are used to analyze the performance of components of
this protocol, but cannot be applied directly to
determine the performance of the protocol as a whole.
Instead, Laplace transforms of the distributions for
the components are first derived and then combined into
a transform for the entire protocol. This transform is
evaluated by contour integration to yield the latency
for the protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danzig:1991:AMO,
author = "Peter B. Danzig",
title = "An analytical model of operating system protocol
processing including effects of multiprogramming",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "11--20",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model the limited buffer queueing process that
occurs within the UNIX operating system's protocol
processing layers. Our model accounts for the effects
of user process multiprogramming and preemptive,
priority scheduling of interrupt, operating system, and
user tasks. After developing the model, we use it to
predict message loss that occurs during local area
network (LAN) multicast. Our service time model can be
applied to window-and rate-based stream flow control.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harinarayan:1991:LSL,
author = "Venkatesh Harinarayan and Leonard Kleinrock",
title = "Load sharing in limited access distributed systems",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "21--30",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107975",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we examine dynamic load sharing in
limited access distributed systems. In this class of
distributed systems all servers are not accessible to
all sources, and there exist many different
accessibility topologies. We focus our attention on the
ring topology and provide an analytic model to derive
the approximate mean waiting time (our metric of
performance). We then consider other limited access
topologies and find that rather different
interconnection patterns give similar performance
measurements. We conjecture that the number of servers
accessible to a source is the parameter with the
greatest performance impact, in a limited access
topology with load sharing. We also introduce another
variable called diversity that is indicative of the
degree of load sharing and speculate that performance
is reasonably insensitive to diversity so long as it is
non-zero. Using these conjectures we show how a
reasonable estimate of the mean waiting time can be
analytically derived in many limited access
topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1991:SPA,
author = "Tein-Hsiang Lin and Wernhuar Tarng",
title = "Scheduling periodic and aperiodic tasks in hard
real-time computing systems",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "31--38",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107976",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling periodic and aperiodic tasks to meet their
time constraints has been an important issue in the
design of real-time computing systems. Usually, the
task scheduling algorithms in such systems must satisfy
the deadlines of periodic tasks and provide fast
response times for aperiodic tasks. A simple and
efficient approach to scheduling real-time tasks is the
use of a periodic server in a static preemptive
scheduling algorithm. Periodic tasks, including the
server, are scheduled {\em at priori\/} to meet their
deadlines according to the knowledge of their periods
and computation times. The scheduling of aperiodic
tasks is then managed by the periodic server during its
service time. In this paper, a new scheduling algorithm
is proposed. The new algorithm creates a periodic
server which will have the highest priority but not
necessarily the shortest period. The server is
suspended to reduce the overhead if there are no
aperiodic tasks waiting, and is activated immediately
upon the arrival of the next aperiodic task. After
activated, the server performs its duty periodically
until all waiting aperiodic tasks are completed. For a
set of tasks scheduled by this algorithm, the deadlines
of periodic tasks are guaranteed by a deterministic
feasibility check, and the mean response time of
aperiodic tasks are estimated using a queueing model.
Based on the analytical results, we can determine the
period and service time of the server producing the
minimum mean response time for aperiodic tasks. The
analytical results are compared with simulation results
to demonstrate the correctness of our model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:1991:ADC,
author = "Robert Berry and Joseph Hellerstein",
title = "An approach to detecting changes in the factors
affecting the performance of computer systems",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "39--49",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Resolving intermittent performance problems in
computer systems is made easier by pinpointing when a
change occurs in the system's performance-determining
factors (e.g., workload composition, configuration).
Since we often lack direct measurements of performance
factors, this paper presents a procedure for indirectly
detecting such changes by analyzing performance
characteristics (e.g., response times, queue lengths).
Our procedure employs a widely used clustering
algorithm to identify candidate change points (the
times at which performance factors change), and a newly
developed statistical test (based on an AR(1) time
series model) to determine the significance of
candidate change points. We evaluate our procedure by
using simulations of M/M/1, FCFS queueing systems and
by applying our procedure to measurements of a
mainframe computer system at a large telephone company.
These evaluations suggest that our procedure is
effective in practice, especially for larger sample
sizes and smaller utilizations. We further conclude
that indirectly detecting changes in performance
factors appears to be inherently difficult in that the
sensitivity of a detection procedure depends on the
magnitude of the change in performance characteristics,
which often has a nonlinear relationship with the
change in performance factors. Thus, a change in
performance factors (e.g., increased service times) may
be more readily detected in some situations (e.g., very
low or very high utilizations) than in others (e.g.,
moderate utilizations). A key insight here is that the
sensitivity of the detection procedure can be improved
by choosing appropriate measures of performance
characteristics. For example, our experience and
analysis suggest that queue lengths can be more
sensitive than response times to changes in arrival
rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bodnarchuk:1991:SWM,
author = "Robert Bodnarchuk and Richard Bunt",
title = "A synthetic workload model for a distributed system
file server",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "50--59",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107978",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The accuracy of the results of any performance study
depends largely on the quality of the workload model
driving it. Not surprisingly then, workload modelling
is an area of great interest to those involved in the
study of computer system performance. While a
significant amount of research has focussed on the
modelling of workloads in a centralized computer
system, little has been done in the context of
distributed systems. The goal of this research was to
model the workload of a distributed system file server
in a UNIX/NFS environment. The resulting model is
distribution-driven and generates workload components
in real time. It runs externally to the system it
drives, thus eliminating any interference at the
server. The model was validated for different workload
intensities to ensure that it provides the flexibility
to vary the workload intensity without loss of
accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merchant:1991:MCA,
author = "Arif Merchant",
title = "A {Markov} chain approximation for the analysis of
{Banyan} networks",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "60--67",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper analyzes the delay suffered by messages in
a clocked, packet-switched, square Banyan network with
$ k \times k $ output-buffered switches by
approximating the flow processes in the network with
Markov chains. We recursively approximate the departure
process of buffers of the $ n^{\rm th} $ stage in terms
of that at the $n$-- l$^{st}$ stage. We show how to
construct the transition matrix for the Markov chain at
each stage of the network and how to solve for the
stationary distribution of the delay in the queues of
that stage. The analytical results are compared with
simulation results for several cases. Finally, we give
a method based on this approximation and the technique
of {\em coupling\/} to compute upper bounds on the time
for the system to approach steady state.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1991:PAF,
author = "T. Lin and L. Kleinrock",
title = "Performance analysis of finite-buffered multistage
interconnection networks with a general traffic
pattern",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "68--78",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107980",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an analytical model for evaluating the
performance of finite-buffered packet switching
multistage interconnection networks using blocking
switches under any general traffic pattern. Most of the
previous research work has assumed unbuffered, single
buffer or infinite buffer cases, and all of them
assumed that every processing element had the same
traffic pattern (either a uniform traffic pattern or a
specific hot spot pattern). However, their models
cannot be applied very generally. There is a need for
an analytical model to evaluate the performance under
more general conditions. We first present a description
of a decomposition {\&} iteration model which we
propose for a specific hot spot pattern. This model is
then extended to handle more general traffic patterns
using a transformation method. For an even more general
traffic condition where each processing element can
have its own traffic pattern, we propose a
superposition method to be used with the iteration
model and the transformation method. We can extend the
model to account for processing elements having
different input rates by adding weighting factors in
the analytical model. An approximation method is also
proposed to refine the analytical model to account for
the memory characteristic of a blocking switch which
causes persistent blocking of packets contending for
the same output ports. The analytical model is used to
evaluate the uniform traffic pattern and a very general
traffic pattern ` EFOS'. Comparison with simulation
indicates that the analytical model is very accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wood:1991:MET,
author = "David A. Wood and Mark D. Hill and R. E. Kessler",
title = "A model for estimating trace-sample miss ratios",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "79--89",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107981",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unknown references, also known as cold-start misses,
arise during trace-driven simulation of uniprocessor
caches because of the unknown initial conditions.
Accurately estimating the miss ratio of unknown
references, denoted by \mu, is particularly important
when simulating large caches with short trace samples,
since many references may be unknown. In this paper we
make three contributions regarding \mu. First, we
provide empirical evidence that \mu is much larger than
the overall miss ratio (e.g., 0.40 vs. 0.02). Prior
work suggests that they should be the same. Second, we
develop a model that explains our empirical results for
long trace samples. In our model, each block frame is
either {\em live}, if its next reference will hit, or
dead, if its next reference will miss. We model each
block frame as an alternating renewal process, and use
the renewal-reward theorem to show that \mu is simply
the fraction of time block frames are dead. Finally, we
extend the model to handle short trace samples and use
it to develop several estimators of \mu. Trace-driven
simulation results show these estimators lead to better
estimates of overall miss ratios than do previous
methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:1991:EMV,
author = "Mee-Chow Chiang and Gurindar S. Sohi",
title = "Experience with mean value analysis model for
evaluating shared bus, throughput-oriented
multiprocessors",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "90--100",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We report on our experience with the accuracy of mean
value analysis analytical models for evaluating shared
bus multiprocessors operating in a throughput-oriented
environment. Having developed separate models for
multiprocessors with circuit switched and split
transaction, pipelined (packet switched) buses, wc
compare the results of the models with those of an
actual trace-driven simulation for 5,376 multiprocessor
configurations. We find that the analytical models are
accurate in predicting the individual processor
throughputs and partial bus utilizations. For processor
throughput, the difference between the results of the
models and simulation are within 1\% for 75\% of the
cases and within 3\% in 94\% of all cases. For partial
bus utilization the model results are with 1\% of
simulation results in 70\% of all cases and within 3\%
in 92\% of all cases. The models are less accurate in
predicting cache miss latency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1991:PAT,
author = "Anurag Gupta and Ian Akyildiz and Richard M.
Fujimoto",
title = "Performance analysis of {Time Warp} with homogeneous
processors and exponential task times",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "101--110",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107983",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The behavior of $n$ interacting processors
synchronized by the `Time Warp' protocol is analyzed
using a discrete state continuous time Markov chain
model. The performance and dynamics of the processes
are analyzed under the following assumptions:
exponential task times and times-tamp increments on
messages, each event message generates one new message
that is sent to a randomly selected process, negligible
rollback, state saving, and communication delay,
unbounded message buffers, and homogeneous processors
that are never idle. We determine the fraction of
processed events that commit, speedup, rollback
probability, expected length of rollback, the
probability mass function for the number of uncommitted
processed events, and the probability distribution
function for the virtual time of a process. The
analysis is approximate, so the results have been
validated through performance measurements of a Time
Warp testbed (PHOLD workload model) executing on a
shared memory multiprocessor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:1991:SDH,
author = "Jong Kim and Chita R. Das",
title = "On subcube dependability in a hypercube",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "111--119",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107984",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present an analytical model for
computing the dependability of hypercube systems. The
model, referred to as task-based dependability (TBD),
is developed under the assumption that a task needs at
least an $m$-cube ($m$) ????",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1991:IOS,
author = "Anoop Gupta and Andrew Tucker and Shigeru Urushibara",
title = "The impact of operating system scheduling policies and
synchronization methods of performance of parallel
applications",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "120--132",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107985",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Shared-memory multiprocessors are frequently used as
compute servers with multiple parallel applications
executing at the same time. In such environments, the
efficiency of a parallel application can be
significantly affected by the operating system
scheduling policy. In this paper, we use detailed
simulation studies to evaluate the performance of
several different scheduling strategies, These include
regular priority scheduling, coscheduling or gang
scheduling, process control with processor
partitioning, handoff scheduling, and affinity-based
scheduling. We also explore tradeoffs between the use
of busy-waiting and blocking synchronization primitives
and their interactions with the scheduling strategies.
Since effective use of caches is essential to achieving
high performance, a key focus is on the impact of the
scheduling strategies on the caching behavior of the
applications. Our results show that in situations where
the number of processes exceeds the number of
processors, regular priority-based scheduling in
conjunction with busy-waiting synchronization
primitives results in extremely poor processor
utilization. In such situations, use of blocking
synchronization primitives can significantly improve
performance. Process control and gang scheduling
strategies are shown to offer the highest performance,
and their performance is relatively independent of the
synchronization method used. However, for applications
that have sizable working sets that fit into the cache,
process control performs better than gang scheduling.
For the applications considered, the performance gains
due to handoff scheduling and processor affinity are
shown to be small.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:1991:PPB,
author = "Songnian Zhou and Timothy Brecht",
title = "Processor-pool-based scheduling for large-scale {NUMA}
multiprocessors",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "133--142",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale Non-Uniform Memory Access (NUMA)
multiprocessors are gaining increased attention due to
their potential for achieving high performance through
the replication of relatively simple components.
Because of the complexity of such systems, scheduling
algorithms for parallel applications are crucial in
realizing the performance potential of these systems.
In particular, scheduling methods must consider the
scale of the system, with the increased likelihood of
creating bottlenecks, along with the NUMA
characteristics of the system, and the benefits to be
gained by placing threads close to their code and data.
We propose a class of scheduling algorithms based on
{\em processor pools}. A processor pool is a software
construct for organizing and managing a large number of
processors by dividing them into groups called pools.
The parallel threads of a job are run in a single
processor pool, unless there are performance advantages
for a job to span multiple pools. Several jobs may
share one pool. Our simulation experiments show that
processor pool-based scheduling may effectively reduce
the average job response time. The performance
improvements attained by using processor pools increase
with the average parallelism of the jobs, the load
level of the system, the differentials in memory access
costs, and the likelihood of having system bottlenecks.
As the system size increases, while maintaining the
workload composition and intensity, we observed that
processor pools can be used to provide significant
performance improvements. We therefore conclude that
processor pool-based scheduling may be an effective and
efficient technique for scalable systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1991:ATM,
author = "Mark S. Squillante and Randolph D. Nelson",
title = "Analysis of task migration in shared-memory
multiprocessor scheduling",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "143--155",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107987",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In shared-memory multiprocessor systems it may be more
efficient to schedule a task on one processor than on
mother. Due to the inevitability of idle processors in
these environments, there exists an important tradeoff
between keeping the workload balanced and scheduling
tasks where they run most efficiently. The purpose of
an adaptive task migration policy is to determine the
appropriate balance between the extremes of this load
sharing tradeoff. We make the observation that there
are considerable differences between this load sharing
problem in distributed and shared-memory multiprocessor
systems, and we formulate a queueing theoretic model of
task migration to study the problem. A detailed
mathematical analysis of the model is developed, which
includes the effects of increased contention for system
resources induced by the task migration policy. Our
objective is to provide a better understanding of task
migration in shared-memory multiprocessor environments.
In particular, we illustrate the potential for
significant improvements in system performance, and we
show that even when migration costs are large it may
still be beneficial to migrate waiting tasks to idle
processors. We further demonstrate the potential for
unstable behavior under migratory scheduling policies,
and we provide optimal policy thresholds that yield the
best performance and avoid this form of processor
thrashing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dan:1991:AMH,
author = "Asit Dan and Daniel M. Dias and Philip S. Yu",
title = "Analytical modelling of a hierarchical buffer for a
data sharing environment",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "156--167",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107988",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a data sharing environment, where a number of
loosely coupled computing nodes share a common storage
subsystem, the effectiveness of a private buffer at
each node is limited due to the multi-system
invalidation effect, particularly under a non-uniform
data access pattern. A global shared buffer can be
introduced to alleviate this problem either as a disk
cache or shared memory. In this paper we developed an
approximate analytic model to evaluate different shared
buffer management policies (SBMPs) which differ in
their choice of data granules to be put into the shared
buffer. The analytic model can be used to study the
trade-offs of different SBMPs and the impact of
different buffer allocations between shared and private
buffers. The effects of various parameters, such as,
the probability of update, the number of nodes, the
sizes of private and shared buffer, etc., on the
performance of SBMPS are captured in the analytic
model. A detailed simulation model is also developed to
validate the analytic model. We show that dependency
between the contents of the private and shared buffers
can play an important role in determining the
effectiveness of the shared buffer particularly for a
small number of nodes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiman:1991:PAC,
author = "Martin Reiman and Paul E. Wright",
title = "Performance analysis of concurrent-read
exclusive-write",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "168--177",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the concurrent-read exclusive-write
protocol for access to a shared resource, such as
occurs in database and distributed operating systems.
Readers arrive according to a Poisson process and
acquire shareable i.e., non-exclusive, locks which,
once granted, are released after a generally
distributed random period. Writers arrive according to
an arbitrary renewal process and acquire exclusive
locks which, once granted, are held for a random time
which is also generally distributed. Locks are granted
in the order in which requests are received. We derive
necessary and sufficient conditions under which the
queue is stable i.e., the latencies for reader/writer
lock acquisition have a limiting distribution. In the
unstable case, the delays of successive readers/writers
become unbounded. The stability condition is sensitive
to the interarrival-time distribution of the writers
and the lock holding-time distribution of the readers
but depends only on the mean lock holding-time of the
writers. Distributional and moment bounds are given for
the latencies of read/write requests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{French:1991:PMP,
author = "James C. French and Terrence W. Pratt and Mriganka
Das",
title = "Performance measurement of a parallel input\slash
output system for the {Intel iPSC\slash 2 Hypercube}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "178--187",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107990",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Intel Concurrent File System (CFS) for the iPSC/2
hypercube is one of the first production file systems
to utilize the declustering of large files across
numbers of disks to improve I/O performance. The CFS
also makes use of dedicated I/O nodes, operating
asynchronously, which provide file caching and
prefetching. Processing of I/O requests is distributed
between the compute node that initiates the request and
the I/O nodes that service the request. The effects of
the various design decisions in the Intel CFS are
difficult to determine without measurements of an
actual system. We present performance measurements of
the CFS for a hypercube with 32 compute nodes and four
I/0 nodes (four disks). Measurement of read/write rates
for one compute node to one I/O node, one compute node
to multiple I/O nodes, and multiple compute nodes to
multiple I/O nodes form the basis for the study.
Additional measurements show the effects of different
buffer sizes, caching, prefetching, and file
preallocation on system performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chervenak:1991:PDA,
author = "Ann L. Chervenak and Randy H. Katz",
title = "Performance of a disk array prototype",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "188--197",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107991",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The RAID group at U.C. Berkeley recently built a
prototype disk array. This paper examines the
performance limits of each component of the array using
SCSI bus traces, Sprite operating system traces and
user programs. The array performs successfully for a
workload of small, random I/O operations, achieving 275
I/Os per second on 14 disks before the Sun4/280 host
becomes CPU-limited. The prototype is less successful
in delivering high throughput for large, sequential
operations. Memory system contention on the Sun4/280
host limits throughput to 2.3 MBytes/sec under the
Sprite Operating System. Throughput is also limited by
the bandwidth supported by the VME backplane, disk
controller and disks, and overheads associated with the
SCSI protocol. We conclude that merely using a powerful
host CPU and many disks will not provide the full
bandwidth possible from disk arrays. Host memory
bandwidth and throughput of disk controllers are
equally important. In addition, operating systems
should avoid unnecessary copy and cache flush
operations that can saturate the host memory system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1991:PMD,
author = "Shenze Chen and Don Towsley",
title = "Performance of a mirrored disk in a real-time
transaction system",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "198--207",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107992",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk mirroring has found widespread use in computer
systems as a method for providing fault tolerance. In
addition to increasing reliability, a mirrored disk can
also reduce I/O response time by supporting the
execution of parallel I/O requests. The improvement in
I/O efficiency is extremely important in a real-time
system, where each computational entity carries a
deadline. In this paper, we present two classes of
real-time disk scheduling policies, RT-DMQ and RT-CMQ,
for a mirrored disk I/O subsystem and examine their
performance in an integrated real-time transaction
system. The real-time transaction system model is
validated on a real-time database testbed, called
RT-CARAT. The performance results show that a mirrored
disk I/O subsystem can decrease the fraction of
transactions that miss their deadlines over a single
disk system by 68\%. Our results also reveal the
importance of real-time scheduling policies, which can
lead up to a 17\% performance improvement over
non-real-time policies in terms of minimizing the
transaction loss ratio.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glenn:1991:IMP,
author = "R. R. Glenn and D. V. Pryor",
title = "Instrumentation for a massively parallel {MIMD}
application",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "208--209",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107993",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an application implemented on a
simulated machine called Horizon. One purpose of this
study is to investigate some of the features of a
possible future machine (or class of machines) with a
view toward deciding, early on in the research cycle,
where problems may come up, what features should be
added or strengthened, and what proposed features seem
to be unnecessary. Another purpose is to learn more
about how to program, instrument and debug a shared
memory, massively parallel MIMD computer, and to begin
to answer some of the questions: What tools does a
programmer need to debug this type of machine? How can
a programmer know if the machine is performing well?
How can bottlenecks be identified? How can the massive
amount of instrumentation information be condensed and
presented to a user in a way that makes sense?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldberg:1991:MMD,
author = "Aaron Goldberg and John Hennessy",
title = "{MTOOL}: a method for detecting memory bottlenecks",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "210--211",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107994",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new, relatively inexpensive
method for detecting regions (e.g. loops and
procedures) in a program where the memory hierarchy is
performing poorly. By observing where actual measured
execution time differs from the time predicted given a
perfect memory system, we can isolate memory
bottlenecks. MTOOL, an implementation of the approach
aimed at applications programs running on MIPS-chip
based workstations is described and results for some of
the Perfect Club and SPEC benchmarks are summarized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:1991:ISS,
author = "Yul H. Kim and Mark D. Hill and David A. Wood",
title = "Implementing stack simulation for highly-associative
memories",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "212--213",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107995",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Prior to this work, all implementations of stack
simulation [MGS70] required more than linear time to
process an address trace. In particular these
implementations are often slow for highly-associative
memories and traces with poor locality, as can be found
in simulations of tile systems. We describe a new
implementation of stack simulation where the referenced
block and its stack distance are found using a hash
table rather than by traversing the stack. The key to
this implementation is that designers are rarely
interested in a continuum of memory sizes, but instead
desire metrics for only a discrete set of alternatives
(e.g., powers of two). Our experimental evaluation
shows the run-time of the new implementation to be
linear in address trace length and independent of trace
locality. Kim, et al., [KHW91] present the results of
this research in more detail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Newman:1991:PAC,
author = "Robb Newman",
title = "Performance analysis case study (abstract):
application of experimental design \& statistical data
analysis techniques",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "214--215",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107996",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common requirement of computer vendor's competitive
performance analysis departments is to measure and
report on the performance characteristics of another
vendor's system. In many cases the amount of prior
knowledge concerning the competitor's system is limited
to sales brochures and non-technical publications.
Availability of the system for benchmarking is minimal;
there is little choice concerning memory and I/O
configurations; and time to complete the project is
short. A project of this nature is not, however, unique
to computer vendors. Many users of computer systems
that want to better understand a system's performance
characteristics before deciding on a purchase, are also
faced with similar restrictions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Park:1991:MPB,
author = "Arvin Park and Jeffrey C. Becker",
title = "Measurements of the paging behavior of {UNIX}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "216--217",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107997",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper analyzes measurements of paging activity
from several different versions of UNIX. We set out to
characterize paging activity by first taking
measurements of it, and then writing programs to
analyze it. In doing so, we were interested in
answering several questions:\par
1. What is the magnitude of paging traffic and how much
of I/O system activity is paging related?\par
2. What are the characteristics of paging activity, and
how can paging system implementations be tuned to match
them?\par
3. How does paging activity vary across different
machines, operating systems, and job mixes?\par
4. How well does paging activity correlate with system
load average and number of users?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pasquale:1991:SDW,
author = "Joseph Pasquale and Barbara Bittel and Daniel
Kraiman",
title = "A static and dynamic workload characterization study
of the {San Diego Supercomputer Center Cray X-MP}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "218--219",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The San Diego Supercomputer Center is one of four NSF
sponsored national supercomputer centers. Up until
January of 1990, its workhorse was a Cray X-MP, which
served 2700 researchers from 170 institutions, spanning
44 states. In order to better understand how this
supercomputer was utilized by its diverse community of
users, we undertook a workload characterization study
of the Cray X-MP. The goals of our study were twofold.
First, we wished to characterize the workload at both
the functional and resource levels. The functional
level represents the user point of view: what types of
programs users are running on the system. The resource
level represents the system point of view: how the
systems resources (CPU, memory, I/O bandwidth) are
being used. Second, we wanted to see how the workload
changed over an average weekday. Thus, we conducted a
static characterization to understand its global
attributes over the entire measurement period, as well
as a dynamic workload characterization to understand
the time behavior of the workload over a weekday
cycle.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pu:1991:EMA,
author = "Calton Pu and Frederick Korz and Robert C. Lehman",
title = "An experiment on measuring application performance
over the {Internet}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "220--221",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107999",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of wide area networks (WANs) such as the
Internet is growing at a tremendous rate. Such networks
hold great promise for new types of distributed
applications, which will be widely distributed, highly
replicated, intensely interactive, and adaptive to many
types of network conditions. Developing such
applications will require a solid understanding of the
performance and availability characteristics of WANs as
they evolve. The ability to measure the effect of these
conditions will, for example, be important for
large-volume applications such as digital libraries,
and for near-real-time applications such as
collaborative research and teleconferencing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:1991:PBB,
author = "Myung K. Yang and Chita R. Das",
title = "A parallel branch-and-bound algorithm for {MIN}-based
multiprocessors",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "222--223",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.108000",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A parallel `Decomposite Best-First' search
Branch-and-Bound algorithm ({\em pdbsbb\/}) for
MIN-based multiprocessor systems is proposed in this
paper. A conflict free mapping scheme, known as {\em
step-by-step spread}, is used to map the algorithm
efficiently on to a MIN-based system for reducing
communication overhead. It is shown that the proposed
algorithm provides better speed-up than other reported
schemes when communication overhead is taken into
consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epema:1991:BRC,
author = "Dick H. J. Epema",
title = "Book Review: {`Computer and Communication Systems
Performance Modelling' by Peter J. B. King (Prentice
Hall, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "4--5",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045494",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book offers a simple and short introduction to
the theory of queueing models of computer and
communication systems. It consists of 14 chapters.
After the first, which gives the motivation and a
feeling for the subject (among other things, by an
informal proof and some simple illustrations of
Little's theorem), there are two preparatory chapters
on probability theory and stochastic processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Al-Jaar:1991:BRA,
author = "Robert Y. Al-Jaar",
title = "Book review: {`The Art of Computer Systems Performance
Analysis: Techniques for Experimental Design,
Measurement, Simulation, and Modeling' by Raj Jain
(John Wiley \& Sons 1991)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "5--11",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045495",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the preface to {\em The Art of Computer Systems
Performance Analysis: Techniques for Experimental
Design, Measurement, Simulation, and Modeling}, Raj
Jain discusses the intended audience and the goals of
the book, which are to:$ \bullet $ Provide computer
professionals simple and straightforward performance
analysis techniques in a comprehensive textbook. $
\bullet $ Give basic modeling, simulation, measurement,
experimental design, and statistical analysis
background. $ \bullet $ Emphasize and integrate the
modeling and measurement aspects of performance
analysis. $ \bullet $ Discuss common mistakes and games
in performance analysis studies. $ \bullet $ Illustrate
the presented techniques using examples and case
studies from the field of computer systems. $ \bullet $
Summarize key techniques and results in `boxes'. $
\bullet $ Organize chapters in 45-minute lectures and
include appropriate exercises.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPd,
author = "David Finkel",
title = "Brief review: {`Probability, Statistics and Queueing
Theory with Computer Science Applications,' Second
Edition by Arnold O. Allen (Academic Press 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "11--12",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045496",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a revision of the classic probability and
statistics text originally written in 1978. Like the
first edition, this book is designed for a upper-level
undergraduate course in probability and statistics with
computer science applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRC,
author = "David Finkel",
title = "Brief review: {`Computer Networks \& Systems: Queueing
Theory and Performance Evaluation' by Thomas Robertazzi
(Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "12--12",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045498",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is the proceedings of the Workshop on
Parallel Computer Systems: Performance Instrumentation
and Visualization held in Santa Fe, New Mexico in May,
1989. Some of the sixteen papers included here discuss
research projects designed primarily to collect
performance data from distributed and parallel systems.
Other papers discuss modern visualization techniques in
general, or report on projects to put these powerful
techniques to work on parallel computer system
performance data, to make this data easier to
understand and to use to improve system or program
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRQ,
author = "David Finkel",
title = "Brief review: {``Queueing Networks --- Exact
Computational Algorithms: A Unified Theory Based on
Decomposition and Aggregation'' by Adrian E. Conway and
Nicholas D. Georganas (MIT Press 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "12--12",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unlike the other, more specialized, books given brief
reviews in this issue, this book would be an
appropriate text for an introductory graduate course in
performance evaluation. The book presumes a knowledge
of probability theory, which is reviewed in an
appendix. There is a chapter on single queueing
systems, which covers the M/M/1 queueing system in
detail, and a number of related models. In particular,
the author has a section on reversibility and one on
the M/G/1 queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPe,
author = "David Finkel",
title = "Brief review: {`Performance Instrumentation \&
Visualization' by Margaret Simmons and Rebecca Koskela
(Addison-Wesley \& ACM Press, 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "12--13",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045499",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book presents a thorough discussion of exact
algorithms for product-form queueing networks. The
authors discuss the well-known Convolution Algorithm,
and Mean Value Analysis (MVA), as well as some more
recent algorithms: Recursion by Chain (RECAL), Mean
Value Analysis by Chain (MVAC), and the Distribution
Analysis by Chain (DAC).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRS,
author = "David Finkel",
title = "Brief review: {`Stochastic Analysis of Computer and
Communication Systems', Ed. by H. Takagi (Elsevier
Science Publishers B.V. 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "13--13",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045500",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is the first volume in a series of books,
designed to give an introduction to research-level
topics in queueing theory applicable to performance
evaluation. As such, it presumes as background a
careful mathematical study of introductory queueing
theory topics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Frankel:1991:BRQ,
author = "David Frankel",
title = "Brief review: {`Queueing Analysis: A Foundation of
Performance Evaluation. Volume 1: Vacation and Priority
Systems, Part 1' by H. Takagi (North-Holland, 1991)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "13--13",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a collection of articles, written especially
for this publication, designed to show the rich variety
of stochastic models applicable to studying the
performance of computer and communications systems.
There are a total of twenty articles, divided into four
sections. The first section, Stochastic Processes,
includes articles using general presenting stochastic
process models applied to computer and communications
system modeling. The second section, Queues, presents
queueing theoretic models which are applicable to
performance modeling, although these articles
concentrate on the queueing models themselves. The
final two sections, Computer Systems and Communication
Systems, present applications of analytic modeling to
these kinds of systems. The final article is an
extensive bibliography compiled by Dr. Takagi of works
on performance evaluation. These are separate sections
for books, special issues of journals, conference
proceedings, and survey and tutorial articles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ames:1991:CTP,
author = "D. Ames and D. Gibson and B. Troy",
title = "Composite theoretical performance",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "24--29",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Export controls require that computer systems,
specifically Digital Central Processing units, be
characterized as to performance. Absolute performance
measurement is not required, rather a very wide range
of CPUs, from micros to supercomputers, must be rank
ordered. Ranking is based on a synthetic
characterization and is influenced by the design
details of the particular processor that make it useful
for one or more strategic applications. This paper
describes the strategic export control concerns, the
rationale involved in the choice of a metric, the
technical considerations, and the elements included in
the CTP metric.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Christianson:1991:ALE,
author = "Bruce Christianson",
title = "{Amdahl's Law} and end of system design",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "30--32",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gene Amdahl has persuasively argued that there are
severe technology-independent limits on the performance
gains which can be achieved by using massively parallel
processing. This conclusion (popularly called {\em
Amdahl's Law\/}) has been supported by a number of
different arguments [1], advanced in the context of
vector processing and also in the context of the
hypercube architecture.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:OWB,
author = "David Finkel and Robert E. Kinicki and Jonas A.
Lehmann",
title = "An overview of the {WPI Benchmark Suite}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "33--35",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The November 1990 issue of Performance Evaluation
Review included a number of articles and opinions on
the merits of commercial bench-mark suites. In the
spirit of continuing this discussion, we present here a
brief introduction to the WPI Benchmark Suite.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Becker:1991:APB,
author = "Jeffrey C. Becker and Arvin Park",
title = "Analysis of the paging behavior of {UNIX}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "36--43",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the paging behavior of several different
versions of UNIX by recording traces of paging activity
over time and writing programs to analyze the traces.
We recorded periodic totals of paging events instead of
individual paging events themselves. Our analysis shows
that paging activity accounts for between 15\% and 21\%
of all disk block accesses. Average paging system
traffic is very low. The paging system is idle most of
the time and paging activity occurs in large periodic
bursts. Despite the fact that it is often overlooked,
swap related paging accounts for a significant portion
of all paging activity (between 24\% and 71\%).
Furthermore, the behavior of swap-related paging
differs greatly from the well-studied behavior of
demand paging. The ratio of pages read to pages written
(which varies between 0.85 and 1.9) is lower than
typical read to write ratios for file system accesses.
Paging activity is loosely correlated with load average
or number of users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fateyev:1991:CEA,
author = "A. E. Fateyev and S. M. Porotskiy and V. I. Drujinin",
title = "Comparative evaluation of approximate methods for
modelling of network systems",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "44--48",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The paper discloses the results of a comparative
evaluation of several approximate methods of queueing
network analysis concerning their accuracy, fields of
validity and computational consumptions; the comparison
is being carried out with varying values of network
parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nangia:1992:BRP,
author = "Ashvini Nangia",
title = "Book Review: {`Performance Analysis of Transaction
Processing Systems' by Wilbur H. Highleyman (Prentice
Hall, 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "9--11",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045110",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book deals with issues related to performance
analysis of a special class of real-time computing
systems called transaction processing systems. Even
though the book primarily discusses OLTP (On-line
Transaction Processing) architectures, it provides an
excellent text for performance evaluation of operating
systems and file systems. In many cases the author
discusses the effect of multiple processors on
performance of the overall system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meng:1992:BRC,
author = "Xiannong Meng",
title = "Book Review: {`Computer Networks and Systems: Queueing
Theory and Performance Evaluation' by Thomas G.
Robertazzi (Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "11--12",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045111",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a book intended for first-year graduate level
courses in statistical performance evaluation. The book
can be used for both network performance and computer
system performance courses although the emphasis is on
computer networks. It assumes a background in computer
networks (first graduate course). Readers should have
solid mathematics background if they use this book as
self-study material. The book does provide a very brief
review on probability theory, but this is not detailed
enough if the readers did not have probability
before.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRS,
author = "?. Finkel",
title = "Brief review: {`Stochastic Modeling and the Theory of
Queues' by Ronald W. Wolfe (Prentice-Hall, 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "12--12",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is intended for a first-year graduate course
in stochastic processes, and queueing theory. It is
mathematically rigorous, and requires a substantial
background in probability theory. The first chapter
provides a review of the necessary topics from
probability theory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRMa,
author = "?. Finkel",
title = "Brief review: {`Markovian Queues' by O. P. Sharma
(Ellis Horwood Publishers 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "12--13",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045491",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This slim monograph presents a novel approach to
understanding the behavior of the M/M/1 queue and of
other Markovian queues with finite capacity. The basic
idea is to construct a two-dimensional model of the
queueing system, where the two dimensions represent the
number of customers who have arrived to the system, and
the number of customers who have departed. A
closed-form solution is then obtained for this model,
from which various performance measures of interest can
be derived. The author also presents transient analysis
of certain Markovian queues based on this same
approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRB,
author = "?. Finkel",
title = "Brief review: {`The Benchmark Handbook: Database and
Transaction Processing Systems,' Ed. by Jim Gray
(Morgan Kaufmann Publishers, Inc., 1991)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "13--13",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045493",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is unique in the performance literature, and
provides a valuable service to those interested in
benchmarking database and transaction processing
systems, or who are interested in benchmarking in
general. The Introduction was written by the editor,
and explains the structure of the book, and has a
discussion of benchmarking in general, explaining the
need for benchmarks, design criteria for benchmarks,
and an overview of the benchmarks presented in the
book.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRMb,
author = "?. Finkel",
title = "Brief review: {``Modeling and Analysis of Local Area
Networks'' by Paul J. Fortier and George Desrochers
(CRC Press, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "13--13",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "According to the author, this book is intended for
network researchers, users, designers and evaluators,
to enable them to make informed decisions about network
design and configuration. Except for the lack of
exercises, this book could also be used as a textbook
in this area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:1992:SWC,
author = "Michael W. Berry",
title = "Scientific workload characterization by loop-based
analyses",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "17--29",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130952",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of scientific and engineering benchmarks have
emerged during the 1980's. Each of these benchmarks has
a different origin, methodology and interpretation.
This report presents a case study of two current
scientific benchmarks and includes a comparison of them
based on their instruction mixes as measured by the
CRAY X-MP {\em hardware performance monitor\/} (hpm).
This particular case study was conducted by graduate
students in a Performance Evaluation course taught
during Spring Quarter 1991 in the Department of
Computer and Information Sciences at the University of
Alabama at Birmingham. Students analyzed the dominant
loops of the application-based Perfect Benchmarks and
noted (where applicable) significant performance
comparisons with the loop-based Livermore Fortran
Kernels. Whether or not any collection of kernel or
loop-based benchmarks can effectively predict the
performance of more sophisticated scientific
application programs is not clear. This case study does
reveal, however, the types of loops which are most
prevalent in codes from various scientific applications
and what their impact is on the overall performance of
these applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Council:1992:CTR,
author = "Corporate Transaction Processing Performance Council",
title = "Complete {TPC} results (as of 9/30/91)",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "32--35",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Deike-Glindemann:1992:SPE,
author = "Hartmut Deike-Glindemann",
title = "{SIQUEUE-PET}: an environment for queueing network
modelling",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "36--44",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Developing models for performance evaluation of
computer systems, logistic systems etc. often is a
complex task. The effort can be considerably reduced if
appropriate software tools are available. In many cases
queueing network models are suitable for solving the
problem to a sufficient degree of accuracy. SIQUEUE-PET
provides an environment for construction, evaluation
and result representation of such models. The user is
assisted through a graphical interface for model
construction as well as for result representation. The
availability of a support for object management
provides further alleviation in the modelling
activities. This contribution gives a brief overview of
the main features of SIQUEUE-PET. From the viewpoint of
modelling style, the availability of aggregation
techniques and the capability of processing
hierarchically structured models is to be emphasized.
An example is included for illustrative purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dujmovic:1992:UMS,
author = "Jozo J. Dujmovi{\'c}",
title = "The use of multiple-subscripted arrays in benchmark
programs",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "45--48",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130955",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the effects of using
multiple-subscripted arrays in benchmark programs. We
identify and exemplify typical problems caused by
multiple-subscripted arrays and show why their usage in
benchmarking should be strictly controlled and
frequently restricted. Multiple-subscripted arrays can
be considered harmful in the case of general purpose
processor-bound benchmarks. On the other hand, the
multiple-subscripted arrays are shown to be suitable
for measuring the optimizing features of compilers,
especially for RISC machines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pooley:1992:BRC,
author = "Rob Pooley",
title = "Book Reviews: {`Computer and Communication Systems
Performance Modelling' by Peter J. B. King (Prentice
Hall 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "13--14",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.1044850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book offers a simple and short introduction to
the theory of queueing models of computer and
communication systems. It consists of 14 chapters.
After the first, which gives the motivation and a
feeling for the subject (among other things, by an
informal proof and some simple illustrations of
Little's theorem), there are two preparatory chapters
on probability theory and stochastic processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1992:MDF,
author = "Anna Hac",
title = "Modeling distributed file systems",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "22--27",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.140729",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes different methods and techniques
used to model, analyze, evaluate and implement
distributed file systems. Distributed file systems are
characterized by the distributed system hardware and
software architecture, in which they are implemented as
well as by the file systems' functions. In addition,
distributed file system performance depends on the load
executed in the system. Modeling and analysis of
distributed file systems requires new methods to
approximate complexity of the system and to provide a
useful solution. The complexity of the distributed file
system is reflected in the possible placement of the
files, file replication, and migration of files and
processes. The synchronization mechanisms are needed to
control file access. File sharing involves load sharing
in distributed environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Molloy:1992:ANB,
author = "Michael K. Molloy",
title = "Anatomy of the {NHFSSTONES} benchmarks",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "28--39",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.140731",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is intended to provide some insight into
the NHFSSTONES benchmark operations and how one may
interpret the results. This white paper covers the
reasons for the benchmarks, the basics of their
operation, the differences between the original
benchmark and its descendants, and finally some
instructions on how to run the benchmark.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keown:1992:RTP,
author = "William F. {Keown, Jr.} and Philip {Koopman, Jr.} and
Aaron Collins",
title = "Real-time performance of the {HARRIS RTX 2000} stack
architecture versus the {Sun 4 SPARC} and the {Sun 3
M68020} architectures with a proposed real-time
performance benchmark",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "40--48",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.140733",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This study compares a stack machine, the Harris RTX
2000, a RISC machine, the Sun 4/SPARC, and a CISC
machine, the Sun3/M68020 for real-time applications. An
attempt is made to compare the generic features of each
machine which are characteristic of their architectural
classes as opposed to being characteristic of the
individual machine only. Performance is compared based
on execution of the Stanford Integer Benchmark series
and on interrupt response characteristics. A simple
Real-Time Performance BenchMark which integrates raw
compute power and interrupt response is proposed, then
used to estimate the real-time performance of the
machines. It is shown that the RTX 2000 outperforms the
others for applications which have a very large number
of interrupts per second, confirming that stack
architectures should perform well in real-time
applications such as high-speed computer communication
systems. For less interrupt intensive applications, the
Sun 4 SPARC performs better.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:1992:MAM,
author = "Margaret Martonosi and Anoop Gupta and Thomas
Anderson",
title = "{MemSpy}: analyzing memory system bottlenecks in
programs",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "1--12",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To cope with the increasing difference between
processor and main memory speeds, modern computer
systems use deep memory hierarchies. In the presence of
such hierarchies, the performance attained by an
application is largely determined by its memory
reference behavior --- if most references hit in the
cache, the performance is significantly higher than if
most references have to go to main memory. Frequently,
it is possible for the programmer to restructure the
data or code to achieve better memory reference
behavior. Unfortunately, most existing performance
debugging tools do not assist the programmer in this
component of the overall performance tuning task. This
paper describes MemSpy, a prototype tool that helps
programmers identify and fix memory bottlenecks in both
sequential and parallel programs. A key aspect of
MemSpy is that it introduces the notion of data
oriented, in addition to code oriented, performance
tuning. Thus, for both source level code objects and
data objects, MemSpy provides information such as cache
miss rates, causes of cache misses, and in
multiprocessors, information on cache invalidations and
local versus remote memory misses. MemSpy also
introduces a concise matrix presentation to allow
programmers to view both code and data oriented
statistics at the same time. This paper presents design
and implementation issues for MemSpy, and gives a
detailed case study using MemSpy to tune a parallel
sparse matrix application. It shows how MemSpy helps
pinpoint memory system bottlenecks, such as poor
spatial locality and interference among data
structures, and suggests paths for improvement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Whalley:1992:FIC,
author = "David B. Whalley",
title = "Fast instruction cache performance evaluation using
compile-time analysis",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "13--22",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache simulation; instruction cache; trace analysis;
trace generation",
}
@Article{LaRowe:1992:ADP,
author = "Richard P. {LaRowe, Jr.} and Mark A. Holliday and
Carla Schlatter Ellis",
title = "An analysis of dynamic page placement on a {NUMA}
multiprocessor",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "23--34",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The class of NUMA (nonuniform memory access time)
shared memory architectures is becoming increasingly
important with the desire for larger scale
multiprocessors. In such machines, the placement and
movement of code and data are crucial to performance.
The operating system can play a role in managing
placement through the policies and mechanisms of the
virtual memory subsystem. In this paper, we develop an
analytic model of memory system performance of a
Local/Remote NUMA architecture based on approximate
mean-value analysis techniques. The model assumes that
a simple workload model based on a few parameters can
often provide insight into the general behavior of real
applications. The model is validated against
experimental data obtained with the DUnX operating
system kernel for the BBN GP1000 while running a
synthetic workload. The results of this validation show
that in general, model predictions are quite good,
though in some cases the model fails to include the
effect of unexpected behaviors in the implementation.
Experiments investigate the effectiveness of dynamic
multiple-copy page placement. We investigate the cost
of incorrect policy decisions by introducing different
percentages of policy error and measuring their effect
on performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicola:1992:AGC,
author = "Victor F. Nicola and Asit Dan and Daniel M. Dias",
title = "Analysis of the generalized clock buffer replacement
scheme for database transaction processing",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "35--46",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133084",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The CLOCK algorithm is a popular buffer replacement
algorithm because of its simplicity and its ability to
approximate the performance of the Least Recently Used
(LRU) replacement policy. The Generalized Clock
(GCLOCK) buffer replacement policy uses a circular
buffer and a weight associated with each page brought
in buffer to decide on which page to replace. We
develop an approximate analysis for the GCLOCK policy
under the Independent Reference Model (IRM) that
applies to many database transaction processing
workloads. We validate the analysis for various
workloads with data access skew. Comparison with
simulations shows that in all cases examined the error
is extremely small (less than 1\%). To show the
usefulness of the model we apply it to a Transaction
Processing Council benchmark A (TPC-A) like workload.
If knowledge of the different data partitions in this
workload is assumed, the analysis shows that, with
appropriate choice of weights, the performance of the
GCLOCK algorithm can be better than the LRU policy.
Performance very close to that for optimal (static)
buffer allocation can be achieved by assigning
sufficiently high weights, and can be implemented with
a reasonably low overhead. Finally, we outline how the
model can be extended to capture the effect of page
invalidation in a multinode system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:1992:CCC,
author = "S. C. Borst and O. J. Boxma and M. B. Comb{\'e}",
title = "Collection of customers: a correlated {M/G/1} queue",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "47--59",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacquet:1992:STD,
author = "Philippe Jacquet",
title = "Subexponential tail distribution in {LaPalice}
queues",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "60--69",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1992:RBC,
author = "Duan-Shin Lee and Bhaskar Sengupta",
title = "A reservation based cyclic server queue with limited
service",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "70--77",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine a problem which is an
extension of the limited service in a queueing system
with a cyclic server. In this service mechanism, each
queue, after receiving service in cycle $j$, makes a
reservation for its service requirement in cycle $ j +
1$. In this paper, we consider symmetric case only,
i.e., the arrival rates to all the queues are the same.
The main contribution to queueing theory is that we
propose an approximation for the queue length and
sojourn-time distributions for this discipline. Most
approximate studies on cyclic queues, which have been
considered before, examine the means only. Our method
is an iterative one, which we prove to be convergent by
using stochastic dominance arguments. We examine the
performance of our algorithm by comparing it to
simulations and show that the results are very good.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramakrishnan:1992:AFT,
author = "K. K. Ramakrishnan and Prabuddha Biswas and
Ramakrishna Karedla",
title = "Analysis of file {I/O} traces in commercial computing
environments",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "78--90",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Improving the performance of the file system is
becoming increasingly important to alleviate the effect
of I/O bottlenecks in computer systems. To design
changes to an existing file system or to architect a
new file system it is important to understand current
usage patterns. In this paper we analyze file I/O
traces of several existing production computer systems
to understand file access behavior. Our analysis
suggests that a relatively small percentage of the
files are active. The amount of total data active is
also quite small for interactive environments. An
average file encounters a relatively small number of
file opens while receiving an order of magnitude larger
number of reads to it. An average process opens quite a
large number of files over a typical prime time period.
What is more significant is that the effect of outliers
on many of the characteristics we studied is dominant.
A relatively small number of processes dominate the
activity, and a very small number of files receive most
of these operations. In addition, we provide a
comprehensive analysis of the dynamic sharing of files
in each of these enviroments, addressing both the
simultaneous and sequential sharing aspects, and the
activity to these shared files. We observe that
although only a third of the active files are
sequentially shared, they receive a very large
proportion of the total operations. We analyze the
traces from a given environment across different
lengths of time, such as one hour, three hour and whole
work-day intervals and do this for 3 different
environments. This gives us an idea of the shortest
length of the trace needed to have confidence in the
estimation of the parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sandhu:1992:CBF,
author = "Harjinder S. Sandhu and Songnian Zhou",
title = "Cluster-based file replication in large-scale
distributed systems",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "91--102",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing need for data sharing in large-scale
distributed systems may place a heavy burden on
critical resources such as file servers and networks.
Our examination of the workload in one large commercial
engineering environment shows that wide-spread sharing
of unstable files among tens to hundreds of users is
common. Traditional client-based file caching
techniques are not scalable in such environments. We
propose Frolic, a scheme for cluster-based file
replication in large-scale distributed file systems. A
cluster is a group of workstations and one or more file
servers on a local area network. Large distributed
systems may have tens or hundreds of clusters connected
by a backbone network. By dynamically creating and
maintaining replicas of shared files on the file
servers in the clusters using those files, we
effectively reduce reliance on central servers
supporting such files, as well as reduce the distances
between the accessing sites and data. We propose and
study algorithms for the two main issues in Frolic, (1)
locating a valid file replica, and (2) maintaining
consistency among replicas. Our simulation experiments
using a statistical workload model based upon
measurement data and real workload characteristics show
that cluster-based file replication can significantly
reduce file access delays and server and backbone
network utilizations in large-scale distributed systems
over a wide range of workload conditions. The workload
characteristics most critical to replication
performance are: the size of shared files, the number
of clusters that modify a file, and the number of
consecutive accesses to files from a particular
cluster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merchant:1992:PAD,
author = "Arif Merchant and Kun-Lung Wu and Philip S. Yu and
Ming-Syan Chen",
title = "Performance analysis of dynamic finite versioning for
concurrent transaction and query processing",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "103--114",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we analyze the performance of dynamic
finite versioning (DFV) schemes for concurrent
transaction and query processing, where a finite number
of consistent snapshots can be derived for query
access. We develop analytical models based on a renewal
process approximation to evaluate the performance of
DFV using $ M \geq 2 $ snapshots. The storage overhead
and obsolescence faced by queries are measured.
Simulation is used to validate the analytical models
and to evaluate the trade-offs between various
strategies for advancing snapshots when $ M > 2 $. The
results show that (1) the analytical models match
closely with simulation; (2) both the storage overhead
and obsolescence are sensitive to the
snapshot-advancing strategies, especially for $ M > 2 $
snapshots; and (3) generally speaking, increasing the
number of snapshots demonstrates a trade-off between
storage overhead and query obsolescence. For cases with
skewed access or low update rates, a moderate increase
in the number of snapshots beyond 2 can substantially
reduce the obsolescence, while the storage overhead may
increase only slightly, or even decrease in some cases.
Moreover, for very low update rates, a large number of
snapshots demonstrates a trade-off between storage
overhead and query obsolescence. For cases with skewed
access or low update rates, a moderate increase in the
number of snapshots beyond 2 can substantially reduce
the obsolescence, while the storage overhead may
increase only slightly, or even decrease in some cases.
Moreover, for very low update rates, a large number of
snapshots can be used to reduce the obsolescence to
almost zero without increasing the storage overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1992:PAL,
author = "Alexander Thomasian",
title = "Performance analysis of locking policies with limited
wait depth",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "115--127",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133095",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a transaction processing system
with the standard two-phase locking (2PL) concurrency
control (CC) method (with the general waiting policy
upon a lock conflict) may be degraded significantly due
to transaction blocking in a high lock contention
environment. In the limit this effect leads to the
thrashing phenomenon, i.e., the majority of the
transactions in the system become blocked. Limiting the
wait depth of blocked transactions is an effective
method to increase the number of active transactions in
the system and to prevent thrashing, but this is at the
cost of additional processing due to transaction
restarts. The no-waiting (or immediate restart) policy
limits the wait-depth to zero, while cautious waiting
and the running priority policies use different methods
to limit the wait depth to one. A variant of the wait
depth limited (WDL) policy [8] also limits the wait
depth to one, while attempting to minimize the wasted
processing incurred by transaction aborts. A unified
methodology to analyze the performance of the 2PL CC
method with limited wait depth policies in a system
with multiple transaction classes is described in this
paper. The analysis is based on Markov chains
representing the execution steps of each transaction in
isolation, but as affected by hardware resource and
data contention with other transactions in the system.
Since the transition rates of the Markov chain are not
known a priori, an iterative solution method is
developed, which is then applied to the running
priority and WDL policies. Simulation is used for
validating the accuracy of the approximate analytic
solutions. Of interest are the conservation laws
governing the rate at which locks are transferred among
transactions, which can be used to verify the
correctness of the analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kurose:1992:CPS,
author = "Jim Kurose",
title = "On computing per-session performance bounds in
high-speed multi-hop computer networks",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "128--139",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a technique for computing upper bounds on
the distribution of individual per-session performance
measures such as delay and buffer occupancy for
networks in which sessions may be routed over several
``hops.'' Our approach is based on first stochastically
bounding the distribution of the number of packets (or
cells) which can be generated by each traffic source
over various lengths of time and then ``pushing'' these
bounds (which are then shown to hold over new time
interval lengths at various network queues) through the
network on a per-session basis. Session performance
bounds can then be computed once the stochastic bounds
on the arrival process have been characterized for each
session at all network nodes. A numerical example is
presented and the resulting distributional bounds
compared with simulation as well as with a point-valued
worst-case performance bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lui:1992:AAB,
author = "John C. S. Lui and Richard R. Muntz",
title = "Algorithmic approach to bounding the mean response
time of a minimum expected delay routing system",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "140--151",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133099",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an algorithmic approach to
bounding the mean response time of a multi-server
system in which the minimum expected delay routing
policy issued, i.e., an arriving job will join the
queue which has the minimal expected value of
unfinished work. We assume the queueing system to have
$K$ servers, each with an infinite capacity queue. The
arrival process is Poisson with parameter $ \lambda $,
and the service time of server $i$ is exponentially
distributed with mean $ 1 / \mu_i, 1 \leq i \leq K$.
The computation algorithm we present allows one to
tradeoff accuracy and computational cost. Upper and
lower bounds on the expected response time and expected
number of customers are computed; the spread between
the bounds can be reduced with additional space and
time complexity. Examples are presented which
illustrate the excellent relative accuracy attainable
with relatively little computation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:1992:SSE,
author = "Edmundo {de Souza e Silva} and Pedro Meji{\'a} Ochoa",
title = "State space exploration in {Markov} models",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "152--166",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133100",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance and dependability analysis is usually
based on Markov models. One of the main problems faced
by the analyst is the large state space cardinality of
the Markov chain associated with the model, which
precludes not only the model solution, but also the
generation of the transition rate matrix. However, in
many real system models, most of the probability mass
is concentrated in a small number of states in
comparison with the whole state space. Therefore,
performability measures may be accurately evaluated
from these ``high probable'' states. In this paper, we
present an algorithm to generate the most probable
states that is more efficient than previous algorithms
in the literature. We also address the problem of
calculating measures of interest and show how bounds on
some measures can be efficiently calculated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Owicki:1992:FPA,
author = "Susan S. Owicki and Anna R. Karlin",
title = "Factors in the performance of the {AN1} computer
network",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "167--180",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "AN1 (formerly known as Autonet) is a local area
network composed of crossbar switches interconnected by
100Mbit/second, full-duplex links. In this paper, we
evaluate the performance impact of certain choices in
the AN1 design. These include the use of FIFO input
buffering in the crossbar switch, the
deadlock-avoidance mechanism, cut-through routing,
back-pressure for flow control, and multi-path routing.
AN1's performance goals were to provide low latency and
high bandwidth in a lightly loaded network. In this it
is successful. Under heavy load, the most serious
impediment to good performance is the use of FIFO input
buffers. The deadlock-avoidance technique has an
adverse effect on the performance of some topologies,
but it seems to be the best alternative, given the
goals and constraints of the AN1 design. Cut-through
switching performs well relative to store-and-forward
switching, even under heavy load. Back-pressure deals
adequately with congestion in a lightly-loaded network;
under moderate load, performance is acceptable when
coupled with end-to-end flow control for bursts.
Multi-path routing successfully exploits redundant
paths between hosts to improve performance in the face
of congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shankar:1992:PCR,
author = "A. Udaya Shankar and Cengiz Alaettino{\u{g}}lu and
Ibrahim Matta and Klaudia Dussa-Zieger",
title = "Performance comparison of routing protocols using
{MaRS}: distance-vector versus link-state",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "181--192",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are two approaches to adaptive routing protocols
for wide-area store-and-forward networks:
distance-vector and link-state. Distance-vector
algorithms use $ O(N \times e) $ storage at each node,
whereas link-state algorithms use $ O(N^2) $, where $N$
is the number of nodes in the network and $e$ is the
average degree of a node. The ARPANET started with a
distance-vector algorithm (Distributed Bellman-Ford),
but because of long-lived loops, changed to a
link-state algorithm (SPF). We show, using a recently
developed network simulator, MaRS, that a newly
proposed distance-vector algorithm (ExBF) performs as
well as SPF. This suggests that distance-vector
algorithms are appropriate for very large wide-area
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:1992:CLC,
author = "Eitan Altman and Philippe Nain",
title = "Closed-loop control with delayed information",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "193--204",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133106",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The theory of Markov Control Model with Perfect State
Information (MCM-PSI) requires that the current state
of the system is known to the decision maker at
decision instants. Otherwise, one speaks of Markov
Control Model with Imperfect State Information
(MCM-ISI). In this article, we introduce a new class of
MCM-ISI, where the information on the state of the
system is delayed. Such an information structure is
encountered, for instance, in high-speed data networks.
In the first part of this article, we show that by
enlarging the state space so as to include the last
known state as well as all the decisions made during
the travel time of the information, we may reduce a
MCM-ISI to a MCM-PSI. In the second part of this paper,
this result is applied to a flow control problem.
Considered is a discrete time queueing model with
Bernoulli arrivals and geometric services, where the
intensity of the arrival stream is controlled. At the
beginning of slot t+1, t=0,1,2,\ldots{}, the decision
maker has to select the probability of having one
arrival in the current time slot from the set {p1, p2},
0 \leq p2 p1 \leq 1, only on the basis of the
queue-length and action histories in [0, t]. The aim is
to optimize a discounted throughput/delay criterion. We
show that there exists an optimal policy of a threshold
type, where the threshold is seen to depend on the last
action.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merchant:1992:AMC,
author = "Arif Merchant",
title = "Analytical models of combining {Banyan} networks",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "205--212",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133107",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present in this paper an analytical model of a
multistage combining Banyan network with output
buffered switches, in hot-sport traffic. In a combining
network, packets bound for the same destination are
combined into one if they meet at a switch; this
alleviates the problem of tree-saturation caused by
hot-spot traffic. We model the flow processes in the
network as Markov chains and recursively approximate
the departure processes of each stage of the network in
terms of the departure processes of the preceding
stage. This model is used to predict the throughput of
the combining network, and comparison with simulation
results shows the prediction to be accurate. A modified
combining scheme based on low priorities for hot
packets is proposed and analyzed. It is shown that this
scheme yields substantial improvements in throughput
over the standard combining scheme.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akyildiz:1992:PAT,
author = "Ian F. Akyildiz and Liang Chen and Samir R. Das and
Richard M. Fujimoto and Richard F. Serfozo",
title = "Performance analysis of ``{Time Warp}'' with limited
memory",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "213--224",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133109",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The behavior of $n$ interacting processes synchronized
by the ``Time Warp'' rollback mechanism is analyzed
under the constraint that the total amount of memory to
execute the program is limited. In Time Warp, a
protocol called ``cancelback'' has been proposed to
reclaim storage when the system runs out of memory. A
discrete state, continuous time Markov chain model for
Time Warp augmented with the cancelback protocol is
developed for a shared memory system with $n$
homogeneous processors and homogeneous workload. The
model allows one to predict speedup as the amount of
available memory is varied. To our knowledge, this is
the first model to achieve this result. The performance
predicted by the model is validated through direct
performance measurements on an operational Time Warp
system executing on a shared-memory multiprocessor
using a workload similar to that in the model. It is
observed that Time Warp with only a few additional
message buffers per processor over that required in the
corresponding sequential execution can achieve
approximately the same or even greater performance than
Time Warp with unlimited memory, if GVT computation and
fossil collection can be efficiently implemented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Turek:1992:SPT,
author = "John Turek and Joel L. Wolf and Krishna R. Pattipati
and Philip S. Yu",
title = "Scheduling parallelizable tasks: putting it all on the
shelf",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "225--236",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133111",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we formulate the following natural
multiprocessor scheduling problem: Consider a parallel
system with $P$ processors. Suppose that there are $N$
tasks to be scheduled on this system, and that the
execution time of each task $ j_\epsilon \{ 1, \ldots
{}, N \} $ is a nonincreasing function $ t_j(\beta_j)$
of the number of processors $ \beta_j \epsilon \{ 1,
\ldots {}, P \} $ allotted to it. The goal is to find,
for each task $j$, an allotment of processors $
\beta_j$, and, overall, a schedule assigning the tasks
to the processors which minimizes the makespan, or
latest task completion time. The so-called shelf
strategy is commonly used for orthogonal rectangle
packing, a related and classic optimization problem.
The prime difference between the orthogonal rectangle
problem and our own is that in our case the rectangles
are, in some sense, malleable: The height of each
rectangle is a nonincreasing function of its width. In
this paper, we solve our multiprocessor scheduling
problem exactly in the context of a shelf-based
paradigm. The algorithm we give uses techniques from
resource allocation theory and employs a variety of
other combinatorial optimization techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bremaud:1992:SLR,
author = "P. Br{\'e}maud and W.-B. Gong",
title = "Stationary likelihood ratios and smoothed perturbation
analysis gradient estimates for the routing problem",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "237--238",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.114676",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present stationary and regenerative form estimates
for the gradients of the cycle variables with respect
to a thinning parameter in the arrival process of G/G/1
queueing systems. Our estimates belong to the category
of the likelihood ratio method (LRM) and smoothed
perturbation analysis (SPA) estimates. The results are
useful in adaptive routing design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Candlin:1992:SPP,
author = "Rosemary Candlin and Peter Fisk and Joe Phillips and
Neil Skilling",
title = "Studying the performance properties of concurrent
programs by simulation experiments on synthetic
programs",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "239--240",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have developed a methodology for constructing
performance models of different types of concurrent
programs, and hence obtaining estimates of execution
times on different multiprocessor machines. A given
class of program is characterized in terms of a small
set of parameters which summarise the behaviour of the
program over time. Synthetic programs with selected
sets of parameters can then be generated and their
execution simulated on a model of some given parallel
machine. By varying the parameters systematically, we
can discover which factors most affect performance. Our
approach has been to conduct factorial experiments from
which we can obtain quantitative predictions of
performance for arbitrary concurrent programs whose
parameter values lie within the extreme factor levels,
and whose synchronization behaviour conforms to one of
a number of common patterns.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:1992:CIP,
author = "Robert F. Berry and Joseph L. Hellerstein",
title = "Characterizing and interpreting periodic behavior in
computer systems",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "241--242",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rahm:1992:HPC,
author = "Erhard Rahm and Donald Ferguson",
title = "High performance cache management for sequential data
access",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "243--244",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chakka:1992:MSG,
author = "Ram Chakka and Isi Mitrani",
title = "Multiprocessor systems with general breakdowns and
repairs (extended abstract)",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "245--246",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133143",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brewer:1992:PHP,
author = "Eric A. Brewer and Chrysanthos N. Dellarocas and
Adrian Colbrook and William E. Weihl",
title = "{PROTEUS}: a high-performance parallel-architecture
simulator",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "247--248",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meliksetian:1992:PAC,
author = "Dikran S. Meliksetian and C. Y. Roger Chen",
title = "Performance analysis of communications in static
interconnection networks",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "249--250",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a model, based on a network of DX/D/1
queues, to predict the communication performance of
static interconnection networks under various
communication patterns. Our model predicts delay time
distributions in the links as well as the first and
second moments of the overall delay time of messages in
the system. These predictions are verified by the
results of simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dan:1992:CDA,
author = "Asit Dan and Philip S. Yu and Jen-Yao Chung",
title = "Characterization of database access skew in a
transaction processing environment",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "251--252",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The knowledge of access skew (non-uniform access) in
each database relation is useful for both workload
management (buffer pool allocation, transaction
routing, etc.), as well as capacity planning for
changing workload mix. However, it is a challenging
problem to characterize the access skew of a real
database workload in a simple manner that can easily be
used to compute the buffer hit probability under the
LRU replacement policy. A concise way to characterize
the access skew is proposed by assuming that the large
number of data pages may be logically grouped into a
small number of partitions such that the frequency of
accessing each page within a partition can be treated
as equal. Based on this approach, a recursive binary
partitioning algorithm is presented that can infer the
access skew from the buffer hit probabilities for a
subset of the buffer sizes. This avoids explicit
estimation of individual access frequencies for the
large number of database pages. The method is validated
of its ability to predict buffer hit from the skew
characterization using production database traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1992:XPE,
author = "Aloke Gupta and Wen-Mei W. Hwu",
title = "{Xprof}: profiling the execution of {X Window}
programs",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "253--254",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shoham:1992:ETP,
author = "Ruth Shoham and Uri Yechiali",
title = "Elevator-type polling systems (abstract)",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "255--257",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baccelli:1992:PSS,
author = "Fran{\c{c}}ois Baccelli and Miguel Canales",
title = "Parallel simulation of stochastic {Petri} nets using
recurrence equations",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "257--258",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Petri nets provide a powerful modeling formalism,
which allows one to describe and study various classes
of systems, such as synchronous and asynchronous
processes, and/or parallel or sequential ones. We
present below a software package, currently under
development, that allows the user to specify a
stochastic marked graph [1] using either a graphical
interface or a specification language. From this
specification a simulation program for a Single
Instruction Multiple Data (SIMD) parallel machine is
generated. A Connection Machine 2 (CM2) is used as the
architecture for running this program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jobmann:1992:PAP,
author = "Manfred R. Jobmann and Johann Schumann",
title = "Performance analysis of a parallel theorem prover",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "259--260",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanley:1992:TRN,
author = "Kim Shanley and Amie Belongia",
title = "{TPC} releases new benchmark: {TPC-C}",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "8--22",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pooley:1992:BRP,
author = "Rob Pooley",
title = "Book review: {`Performance Engineering of Software
Systems' by Connie U. Smith (Addison Wesley 1990)}",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "23--24",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.1044851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To those working in the field of performance, Connie
Smith should need no introduction. She is the author of
many papers which have sought to make accessible the
techniques of performance analysis and prediction to
practising software designers. She is probably the
first to have used the term `performance engineering'
to describe the application of such techniques to
software systems. The publication of a book which
encapsulates her ideas is therefore of considerable
interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Taylor:1992:BRQ,
author = "Stephen Taylor",
title = "Book review: {``Queuing Networks --- Exact
Computational Algorithms: A Unified Theory Based on
Decomposition and Aggregation'' by Adrian E. Conway and
Nicolas D. Georganas (MIT Press 1989)}",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "24--26",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.1044852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queuing Network models are an approach to modeling
real-world problems based on the abstractions of
servers, queues, and routing between them. Product-form
queuing networks have a particularly simple formula
describing the state distribution, and have accrued a
literature describing them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kobayashi:1992:CMM,
author = "Makoto Kobayashi",
title = "A cache multitasking model",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "27--37",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A hierarchical program behavior model in a
multitasking environment was proposed and applied to a
cache multitasking model for performance evaluation.
The hierarchical program behavior model consists of the
task switching model, execution interval model, and the
line (block) reference behavior model for each
individual task. An execution interval is a continuous
execution of a task between task switches. As a task
executes in an execution interval, it brings its lines
into a cache according to the line reference behavior
model. The Stack Growth Function (SGF) model was used
for this purpose. The state of a cache is defined by
the numbers of lines of the individual tasks. The state
of a cache at task switches then constitutes an
embedded Markov chain. Although a set of simultaneous
linear equations in steady state cannot exactly be
solved practically because of its excessively large
state space, it can be solved very efficiently by a
Monte-Carlo simulation. The model was validated against
the miss rate measured by a hardware monitor in a
controlled environment on a mainframe running IBM MVS
operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Porotskiy:1992:DTM,
author = "S. M. Porotskiy and A. E. Fateev",
title = "Development trends in methods for efficiency
evaluation of {ES}-based computer systems",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "38--42",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141864",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper concerns some methods for efficiency
evaluation of IBM-compatible universal ES computers, as
being improved during their short life-time. The
multi-level structure of computer efficiency is
described, and the factors influencing its
quantification are pointed out. The measured results
are given on the capacity of individual computers with
different loads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Porotskiy:1992:SRP,
author = "S. M. Porotskiy and A. E. Fateev",
title = "System and real performance evaluation of computer",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "43--46",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to review some methods
for efficiency evaluation of universal computer
systems. This paper is continued of [1] and concerns
the measurements and analytical modeling for
performance evaluation on system and real levels.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vandeLiefvoort:1993:BRM,
author = "Apple van de Liefvoort",
title = "Book review: {``Multiple Access Protocols: Performance
and Analysis'' by Raphael Rom and Moshe Sidi
(Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "5--6",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.1044950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiple Access Protocols, the focus of this book, are
the rules and procedures which dictate the behavior of
switches and channels in computer networks, they are
the channel allocation schemes that can be found in the
medium Access Control layer in the OSI reference model.
Most of us have heard of FDMA, TDMA, CDMA, Ethernet,
CSMA, CSMA/CD, Aloha, token passing, packet switching,
or their many, many variations. According to their
preface, the authors aim this book at the student and
professional engineer who is (or will be) responsible
for the design and/or operation for such networks.
Rather than giving a vast compendium of protocols and
their analysis, they hope to give an understanding of
the behavior and operation of multiple access systems
through their performance analysis. They try to cover
all types of protocols for random access networks and
most of the analytical methods used in their
performance analysis with a uniform notation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{TPC:1993:STRa,
author = "{Corporate TPC}",
title = "Summary of {TPC} results (as of {December 22, 1992})",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "7--21",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.155769",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maffeis:1993:FAP,
author = "Silvano Maffeis",
title = "File access patterns in public {FTP} archives and an
index for locality of reference",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "22--35",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.155771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Global filesystems and new file transfer protocols are
a great need and challenge in the presence of
drastically growing networks. In this paper we present
results obtained from an investigation of access to
public files which took place over three months. This
work visualizes first results on the popularity of
public ftp files, on common operations (deletions,
updates and insertions) to public file-archives and on
encountered filesizes. An index for measuring locality
of reference to a resource is also proposed. The
results show that most file transfers relate to only a
small fraction of the files in an archive and that a
considerable part of the operations to public files are
updates of files. Further results are presented and
interpreted in the paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file transfer; filesizes; locality of reference;
popularity of files; replication",
}
@Article{Ulusoy:1993:AAR,
author = "{\"O}zg{\"u}r Ulusoy",
title = "An approximate analysis of a real-time database
concurrency control protocol via {Markov} modeling",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "36--48",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.155773",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Transactions processed in a real-time database system
(RTDBS) are associated with real-time constraints
typically in the form of deadlines. Computer-integrated
manufacturing, the stock market, banking, and command
and control systems are several examples of RTDBS
applications where the timeliness of transaction
response is as important as the consistency of data.
Design of a RTDBS requires the integration of concepts
from both real-time systems and database systems to
handle the timing and consistency requirements
together; i.e., to execute transactions so as to both
meet the deadlines and maintain the database
consistency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{IBM:1993:SP,
author = "{Corporate IBM Systems Analysis Department}",
title = "Selected publications: 1992",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "3--9",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155777",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{TPC:1993:STRb,
author = "{Corporate TPC}",
title = "Summary of {TPC} results (as of {March} 15, 1993)",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "10--23",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raatikainen:1993:CAW,
author = "Kimmo E. E. Raatikainen",
title = "Cluster analysis and workload classification",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "24--30",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155781",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Clustering techniques are widely recommended tools for
workload classification. The k-means algorithm is
widely accepted as the `standard' technique of
detecting workload classes automatically from
measurement data. This paper examines validity of the
obtained workload classes, when the current system and
workload is analyzed by a queueing network model and
mean value analysis. Our results, based on one week's
accounting data of a VAX 8600, indicate that the
results of queueing network analysis are not stable
when the classes of workload are constructed through
the {\em k-means\/} algorithm. Therefore, we cannot
recommended that the most widely used clustering
technique should be used in any workload
characterization study without careful validation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1993:EPP,
author = "Robert B. Smith and James K. Archibald and Brent E.
Nelson",
title = "Evaluating performance of prefetching second level
caches",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "31--42",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the increasing disparity between processor and
main memory system cycle times, many computer systems
are now incorporating two levels fo cache memory.
Several studies have been done on the design and
performance of second level caches, including [3] and
[20]. It certainly can and has been shown that the
addition of a second level of cache enhances the
performance of many systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1993:NAP,
author = "Peter M. Chen and David A. Patterson",
title = "A new approach to {I/O} performance evaluation:
self-scaling {I/O} benchmarks, predicted {I/O}
performance",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "1--12",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current I/O benchmarks suffer from several chronic
problems: they quickly become obsolete, they do not
stress the I/O system, and they do not help in
understanding I/O system performance. We propose a new
approach to I/O performance analysis. First, we propose
a self-scaling benchmark that dynamically adjusts
aspects of its workload according to the performance
characteristic of the system being measured. By doing
so, the benchmark automatically scales across current
and future systems. The evaluation aids in
understanding system performance by reporting how
performance varies according to each of fie workload
parameters. Second, we propose predicted performance, a
technique for using the results from the self-scaling
evaluation to quickly estimate the performance for
workloads that have not been measured. We show that
this technique yields reasonably accurate performance
estimates and argue that this method gives a far more
accurate comparative performance evaluation than
traditional single point benchmarks. We apply our new
evaluation technique by measuring a SPARCstation 1+
with one SCSI disk, an HP 730 with one SCSI-II disk, a
Sprite LFS DECstation 5000/200 with a three-disk disk
array, a Convex C240 minisupercomputer with a four-disk
disk array, and a Solbourne 5E/905 fileserver with a
two-disk disk array.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Biswas:1993:TDA,
author = "Prabuddha Biswas and K. K. Ramakrishnan and Don
Towsley",
title = "Trace driven analysis of write caching policies for
disks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "13--23",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166971",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The I/O subsystem in a computer system is becoming the
bottleneck as a result of recent dramatic improvements
in processor speeds. Disk caches have been effective in
closing this gap but the benefit is restricted to the
read operations as the write I/Os are usually committed
to disk to maintain consistency and to allow for crash
recovery. As a result, write I/O traffic is becoming
dominant and solutions to alleviate this problem are
becoming increasingly important. A simple solution
which can easily work with existing tile systems is to
use non-volatile disk caches together with a
write-behind strategy. In this study, we look at the
issues around managing such a cache using a detailed
trace driven simulation. Traces from three different
commercial sites are used in the analysis of various
policies for managing the write cache. We observe that
even a simple write-behind policy for the write cache
is effective in reducing the total number of writes by
over 50\%. We further observe that the use of
hysteresis in the policy to purge the write cache, with
two thresholds, yields substantial improvement over a
single threshold scheme. The inclusion of a mechanism
to piggyback blocks from the write cache with read miss
I/Os further reduces the number of writes to only about
15\% of the original total number of write operations.
We compare two piggybacking options and also study the
impact of varying the write cache size. We briefly
looked at the case of a single non-volatile disk cache
to estimate the performance impact of statically
partitioning the cache for reads and writes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sugumar:1993:ESC,
author = "Rabin A. Sugumar and Santosh G. Abraham",
title = "Efficient simulation of caches under optimal
replacement with applications to miss
characterization",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "24--35",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache miss characterization models such as the three
Cs model are useful in developing schemes to reduce
cache misses and their penalty. In this paper we
propose the OPT model that uses cache simulation under
optimal (OPT) replacement to obtain a finer and more
accurate characterization of misses than the three Cs
model. However, current methods for optimal cache
simulation are slow and difficult to use. We present
three new techniques for optimal cache simulation.
First, we propose a limited lookahead strategy with
error fixing, which allows one pass simulation of
multiple optimal caches. Second, we propose a scheme to
group entries in the OPT stack, which allows efficient
tree based fully-associative cache simulation under
OPT. Third, we propose a scheme for exploiting partial
inclusion in set-associative cache simulation under
OPT. Simulators based on these algorithms were used to
obtain cache miss characterizations using the OPT model
for nine SPEC benchmarks. The results indicate that
miss ratios under OPT are substantially lower than
those under LRU replacement, by up to 70\% in
fully-associative caches, and up to 32\% in two-way
set-associative caches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chame:1993:CIP,
author = "Jacqueline Chame and Michel Dubois",
title = "Cache inclusion and processor sampling in
multiprocessor simulations",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "36--47",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The evaluation of cache-based systems demands careful
simulations of entire benchmarks. Simulation efficiency
is essential to realistic evaluations. For systems with
large caches and large number of processors, simulation
is often too slow to be practical. In particular, the
optimized design of a cache for a multiprocessor is
very complex with current techniques. This paper
addresses these problems. First we introduce necessary
and sufficient conditions for cache inclusion in
systems with invalidations. Second, under cache
inclusion, we show that an accurate trace for a given
processor or for a cluster of processors can be
extracted from a multiprocessor trace. With this
methodology, possible cache architectures for a
processor or for a cluster of processors are evaluated
independently of the rest of the system, resulting in a
drastic reduction of the trace length and simulation
complexity. Moreover, many important system-wide
metrics can be estimated with good accuracy by
extracting the traces of a set of randomly selected
processors, an approach we call processor sampling. We
demonstrate the accuracy and efficiency of these
techniques by applying them to three 64-processor
traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reinhardt:1993:WWT,
author = "Steven K. Reinhardt and Mark D. Hill and James R.
Larus and Alvin R. Lebeck and James C. Lewis and David
A. Wood",
title = "The {Wisconsin Wind Tunnel}: virtual prototyping of
parallel computers",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "48--60",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have developed a new technique for evaluating cache
coherent, shared-memory computers. The Wisconsin Wind
Tunnel (WWT) runs a parallel shared-memory program on a
parallel computer (CM-5) and uses execution-driven,
distributed, discrete-event simulation to accurately
calculate program execution time. WWT is a virtual
prototype that exploits similarities between the system
under design (the target) and an existing evaluation
platform (the host). The host directly executes all
target program instructions and memory references that
hit in the target cache. WWT's shared memory uses the
CM-5 memory's error-correcting code (ECC) as valid bits
for a fine-grained extension of shared virtual memory.
Only memory references that miss in the target cache
trap to WWT, which simulates a cache-coherence
protocol. WWT correctly interleaves target machine
events and calculates target program execution time.
WWT runs on parallel computers with greater speed and
memory capacity than uniprocessors. WWT's simulation
time decreases as target system size increases for
fixed-size problems and holds roughly constant as the
target system and problem scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adve:1993:IRD,
author = "Vikram S. Adve and Mary K. Vernon",
title = "The influence of random delays on parallel execution
times",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "61--73",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic models are widely used for the performance
evaluation of parallel programs and systems. The
stochastic assumptions in such models exe intended to
represent non-deterministic processing requirements as
well as random delays due to inter-process
communication end resource contention. In this paper,
we provide compelling analytical and experimental
evidence that in current and foreseeable shared-memory
programs, communication delays introduce negligible
variance into the execution time between
synchronization points. Furthermore, we show using
direct measurements of variance that other sources of
randomness, particularly non-deterministic
computational requirements, also do not introduce
significant variance in many programs. We then use two
examples to demonstrate the implications of these
results for parallel program performance prediction
models, as well as for general stochastic models of
parallel systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rosti:1993:KEM,
author = "E. Rosti and E. Smirni and T. D. Wagner and A. W. Apon
and L. W. Dowdy",
title = "The {KSR1}: experimentation and modeling of
poststore",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "74--85",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166985",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Kendall Square Research introduced the KSR1 system in
1991. The architecture is based on a ring of rings of
64-bit microprocessora. It is a distributed, shared
memory system and is scalable. The memory structure is
unique and is the key to understanding the system.
Different levels of caching eliminates physical memory
addressing and leads to the ALLCACHE\TM{} scheme. Since
requested data may be found in any of several caches,
the initial access time is variable. Once pulled into
the local (sub) cache, subsequent access times are
fixed and minimal. Thus, the KSR1 is a Cache-Only
Memory Architecture (COMA) system. This paper describes
experimentation and an analytic model of the KSR1. The
focus is on the poststore programmer option. With the
poststore option, the programmer can elect to broadcast
the updated value of a variable to all processors that
might have a copy. This may save time for threads on
other processors, but delays the broadcasting thread
and places additional traffic on the ring. The specific
issue addressed is to determine under what conditions
poststore is beneficial. The analytic model and the
experimental observations are in good agreement. They
indicate that the decision to use poststore depends
both on the application and the current system load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ganger:1993:PFM,
author = "Gregory R. Ganger and Yale N. Patt",
title = "The process-flow model: examining {I/O} performance
from the system's point of view",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "86--97",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Input/output subsystem performance is currently
receiving considerable research attention. Significant
effort has been focused on reducing average I/O
response times and increasing throughput for a given
workload. This work has resulted in tremendous advances
in I/O subsystem performance. It is unclear, however,
how these improvements will be reflected in overall
system performance. The central problem lies in the
fact that the current method of study tends to treat
all I/O requests aa equally important. We introduce a
three class taxonomy of I/O requests based on their
effects on system performance. We denote the three
classes {\em time-critical, time-limited, and
time-noncritical}. A system-level, trace-driven
simulation model has been developed for the purpose of
studying disk scheduling algorithms. By incorporating
knowledge of I/O classes, algorithms tuned for system
performance rather than I/O subsystem performance may
be developed. Traditional I/O subsystem simulators
would rate such algorithms unfavorably because they
produce suboptimal subsystem performance. By studying
the I/O subsystem via global, system-level simulation,
one can more easily identify changes that will improve
overall system performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1993:APM,
author = "Edward K. Lee and Randy H. Katz",
title = "An analytic performance model of disk arrays",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "98--109",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166994",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As disk arrays become widely used, tools for
understanding and analyzing their performance become
increasingly important. In particular, performance
models can be invaluable in both configuring and
designing disk arrays. Accurate analytic performance
models are preferable to other types of models because
they can be quickly evaluated, are applicable under a
wide range of system and workload parameters, and can
be manipulated by a range of mathematical techniques.
Unfortunately, analytic performance models of disk
arrays are difficult to formulate due to the presence
of {\em queueing\/} and {\em fork-join
synchronization\/}; a disk array request is broken up
into independent disk requests which must all complete
to satisfy the original request. In this paper, we
develop and validate an analytic performance model for
disk arrays. We derive simple equations for
approximating their utilization, response time and
throughput. We validate the analytic model via
simulation, investigate the error introduced by each
approximation used in deriving the analytic model, and
examine the validity of some of the conclusions that
can be drawn from the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:1993:MMB,
author = "Dong Tang and Ravishankar K. Iyer",
title = "{MEASURE+}: a measurement-based dependability analysis
package",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "110--121",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166996",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most existing dependability modeling and evaluation
tools are designed for building and solving commonly
used models with emphasis on solution techniques, not
for identifying realistic models from measurements. In
this paper, a measurement-based dependability analysis
package, MEASURE+, is introduced. Given measured data
from real systems in a specified format MEASURE+ can
generate appropriate dependability models and measures
including Markov and semi-Markov models, $k$-out-of-$n$
availability models, failure distribution and hazard
functions, and correlation parameters. These models and
measures obtained from data are valuable for
understanding actual error/failure characteristics,
identifying system bottlenecks, evaluating
dependability for real systems, and verifying
assumptions made in analytical models. The paper
illustrates MEASURE+ by applying it to the data from a
VAXcluster multicomputer system. Models of field
failure behavior identified by MEASURE+ indicate that
both traditional models assuming failure independence
and those few taking correlation into account are not
representative of the actual occurrence process of
correlated failures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramesh:1993:STS,
author = "A. V. Ramesh and Kishor Trivedi",
title = "On the sensitivity of transient solutions of {Markov}
models",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "122--134",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the sensitivity of transient solutions of
Markov models to perturbations in their generator
matrices. The perturbations can either be of a certain
structure or can be very general. We consider two
different measures of sensitivity and derive upper
bounds on them. The derived bounds are sharper than
previously reported bounds in the literature. Since the
sensitivity analysis of transient solutions is
intimately related to the condition of the exponential
of the CTMC matrix, we derive an expression for the
condition number of the CTMC matrix exponential which
leads to some interesting implications. We compare the
derived sensitivity bounds both numerically and
analytically with those reported in the literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1993:PSM,
author = "David M. Nicol and Philip Heidelberger",
title = "Parallel simulation of {Markovian} queueing networks
using adaptive uniformization",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "135--145",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167000",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a method for simulating a large
class of queueing network models with Markovian
phase-type distributions on parallel architectures. The
method, which is based on uniformization, exploits
Markovian properties that permit one to first build
schedules of simulation times at which processors ought
to synchronize, and then simulate a mathematically
correct sample path through the pre-chosen schedule.
While the technique eliminates many of the overheads
incurred by other synchronization methods, it may
suffer when the maximum rate (in simulation time) at
which one processor might possibly ever send jobs to
another is much larger than the average rate at which
it actually does. We show how to reduce these
overheads, sometimes doubling the execution rate as a
result. We discuss experiments performed on the Intel
iPSC/2 and Touchstone Delta architectures, where
speedups in excess of 155 are observed on 256
processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldschmidt:1993:ATD,
author = "Stephen R. Goldschmidt and John L. Hennessy",
title = "The accuracy of trace-driven simulations of
multiprocessors",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "146--157",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167001",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In trace-driven simulation, traces generated for one
set of system characteristics are used to simulate a
system with different characteristics. However, the
execution path of a multiprocessor workload may depend
on the order of events occurring on different
processing elements. The event order, in turn, depends
on system characteristics such as memory-system
latencies and buffer-sizes. Trace-driven simulations of
multiprocessor workloads are inaccurate unless the
dependencies are eliminated from the traces. We have
measured the effects of these inaccuracies by comparing
trace-driven simulations to direct simulations of the
same workloads. The simulators predicted identical
performance only for workloads whose traces were
timing-independent. Workloads that used first-come
first-served scheduling and/or non-deterministic
algorithms produced timing-dependent traces, and
simulation of these traces produced inaccurate
performance predictions. Two types of performance
metrics were particularly affected: those related to
synchronization latency and those derived from
relatively small numbers of events. To accurately
predict such performance metrics, timing-independent
traces or direct simulation should be used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Setia:1993:PSM,
author = "Sanjeev K. Setia and Mark S. Squillante and Satish K.
Tripathi",
title = "Processor scheduling on multiprogrammed, distributed
memory parallel computers",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "158--170",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167002",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multicomputers, consisting of many processing nodes
connected through a high speed interconnection network,
have become an important and common platform for a
large body of scientific computations. These parallel
systems have traditionally executed programs in batch
mode, or have at most space-shared the processors among
multiple programs using a static partitioning policy.
This, however, can result in relatively low system
utilization and throughput for important classes of
scientific applications. In this paper we consider `a
class of scheduling policies that attempt to increase
processor utilization and system throughput by
timesharing a partition of processors among multiple
programs. We compare the system performance under this
multiprogramming policy with that of static
partitioning for a variety of workloads via both
analytic and simulation modeling. Our results show that
timesharing a partition can provide significant
improvements in performance, particularly at moderate
to heavy loads. The performance gains of the
multiprogrammed policy depend upon the inherent
efficiency of the parallel programs that comprise the
workload, decreasing with increasing program
efficiency. Our analysis also provides the regions over
which one scheduling policy outperforms the other, as a
function of system load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:1993:PCT,
author = "Kun-Lung Wu and Philip S. Yu and James Z. Teng",
title = "Performance comparison of thrashing control policies
for concurrent {Mergesorts} with parallel prefetching",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "171--182",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167003",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the performance of various run-time thrashing
control policies for the merge phase of concurrent
mergesorts using parallel prefetching, where initial
sorted runs are stored on multiple disks and the final
sorted run is written back to another dedicated disk.
Parallel prefetching via multiple disks can be
attractive in reducing the response times for
concurrent mergesorts. However, severe {\em
thrashing\/} may develop due to imbalances between
input and output rates, thus a large number of
prefetched pages in the buffer can be replaced before
referenced. We evaluate through detailed simulations
three run-time thrashing control policies: (a)
disabling prefetching, (b) forcing synchronous writes
and (c) lowering the prefetch quantity in addition to
forcing synchronous writes. The results show that (1)
thrashing resulted from parallel prefetching can
severely degrade the system response time; (2) though
effective in reducing the degree of thrashing,
disabling prefetching may worsen the response time
since more synchronous reads are needed; (3) forcing
synchronous writes can both reduce thrashing and
improve the response time; (4) lowering the prefetch
quantity in addition to forcing synchronous writes is
most effective in reducing thrashing and improving the
response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meliksetian:1993:MMB,
author = "Dikran S. Meliksetian and C. Y. Roger Chen",
title = "A {Markov}-modulated {Bernoulli} process approximation
for the analysis of {Banyan} networks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "183--194",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167005",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Markov-Modulated Bernoulli Process (MMBP) model is
used to analyze the delay experienced by messages in
clocked, packed-switched Banyan networks with $ k
\times k $ output-buffered switches. This approach
allows us to analyze both single packet messages and
multipacket messages with general traffic pattern
including uniform traffic, hot-spot traffic, locality
of reference, etc. The ability to analyze multipacket
messages is very important for multimedia applications.
Previous work, which is only applicable to restricted
message and traffic patterns, resorts to either
heuristic correction factors to artificially tune the
model or tedious computational efforts. In contrast,
the proposed model, which is applicable to much more
general message and traffic patterns, not only is an
application of a theoretically complete model but also
requires a minimal amount of computational effort. In
all cases, the analytical results are compared with
results obtained by simulation and are shown to be very
accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arakawa:1993:MVR,
author = "Hiroshi Arakawa and Daniel I. Katcher and Jay K.
Strosnider and Hideyuki Tokuda",
title = "Modeling and validation of the real-time {Mach}
scheduler",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "195--206",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167008",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time scheduling theory is designed to provide
{\em a priori\/} verification that all real-time tasks
meet their timing requirements. However, this body of
theory generally assumes that resources are
instantaneously pre-emptable and ignores the costs of
systems services. In previous work [1, 2] we provided a
theoretical foundation for including the costs of the
operating system scheduler in the real-time scheduling
framework. In this paper, we apply that theory to the
Real-Time (RT) Mach scheduler. We describe a
methodology for measuring the components of the RT Mach
scheduler in user space. We analyze the predicted
performance of different real-time task sets on the
target system using the scheduling model and the
measured characteristics. We then verify the model
experimentally by measuring the performance of the
real-time task sets, consisting of RT Mach threads, on
the target system, The experimental measurements verify
the analytical model to within a small percentage of
error. Thus, using the model we have successfully
predicted the performance of real-time task sets using
system services, and developed consistent methodologies
to accomplish that prediction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baruah:1993:RHS,
author = "Sanjoy Baruah and Jayant R. Haritsa",
title = "{ROBUST}: a hardware solution to real-time overload",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "207--216",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167010",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "No on-line scheduling algorithm operating in a
uniprocessor environment can guarantee to obtain an
effective processor utilization greater than 25\% under
conditions of overload. This result holds in the most
general case, where incoming tasks may have arbitrary
slack times. We address here the issue of improving
overload performance in environments where the
slack-time characteristics of all incoming tasks
satisfy certain constraints. In particular, we present
a new scheduling algorithm, ROBUST, that efficiently
takes advantage of these task slack constraints to
provide improved overload performance and is
asymptotically optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dey:1993:ELP,
author = "Jayanta K. Dey and James F. Kurose and Don Towsley and
C. M. Krishna and Mahesh Girkar",
title = "Efficient on-line processor scheduling for a class of
{IRIS} ({Increasing Reward with Increasing Service})
real-time tasks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "217--228",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167013",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider the problem of on-line
scheduling of real-time tasks which receive a `reward'
that depends on the amount of service received. In our
model, tasks have associated deadlines at which they
must depart the system. The task computations are such
that the longer they are able to execute before their
deadline, the greater the value of their computations,
i.e., the tasks have the property that they receive
{\em increasing reward with increasing service (IRIS)}.
We focus on the problem of scheduling IRIS tasks in a
system in which tasks arrive randomly over time, with
the goal of maximizing the average reward accrued per
task and per unit time. We describe and evaluate a
two-level policy for this system. A top-level algorithm
executes each time a task arrives and determines the
amount of service to allocate to each task in the
absence of future arrivals. A lower-level algorithm, an
earliest deadline first (EDF) policy in our case, is
responsible for the actual selection of tasks to
execute. This two-level policy is evaluated through a
combination of analysis and simulation, We observe that
it provides nearly optimal performance when the
variance in the interarrival times and/or laxities is
low and that the performance is more sensitive to
changes in the arrival process than the deadline
distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morris:1993:ASS,
author = "Robert J. T. Morris",
title = "Analysis of superposition of streams into a cache
buffer",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "229--235",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167016",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the superposition of address streams into
a cache buffer which is managed according to a Least
Recently Used (LRU) replacement policy. Each of the
streams is characterized by a stack depth distribution,
i.e., the cache hit ratio as a function of the cache
size, if that individual stream were applied to a LRU
cache. We seek the cache hit ratio for each stream,
when the combined stream is applied to a shared LRU
cache. This problem arises in a number of branches of
computer science, particularly in database systems and
processor architecture. We provide two techniques to
solve this problem and demonstrate their effectiveness
using database I/O request streams. The first technique
is extremely simple and relies on an assumption that
the buffer is `well-mixed'. The second technique
relaxes this assumption and provides more accurate
results. We evaluate the performance of the two
techniques on realistic data, both in a lab environment
and a large database installation. We find that the
first simple technique provides accuracy which is
sufficient for most practical purposes. By
investigating sources of error and trying various
improvements in the model we obtain some insight into
the nature of database I/O request streams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsai:1993:AMC,
author = "Jory Tsai and Anant Agarwal",
title = "Analyzing multiprocessor cache behavior through data
reference modeling",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "236--247",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops a {\em data reference modeling\/}
technique to estimate with high accuracy the cache miss
ratio in cache-coherent multiprocessors. The technique
involves analyzing the dynamic data referencing
behavior of parallel algorithms. Data reference
modeling first identifies different types of shared
data blocks accessed during the execution of a parallel
algorithm, then captures in a few parameters the cache
behavior of each shared block as a function of the
problem size, number of processors, and cache line
size, and finally constructs an analytical expression
for each algorithm to estimate the cache miss ratio.
Because the number of processors, problem size, and
cache line size are included as parameters, the
expression for the each miss ratio can be used to
predict the performance of systems with different
configurations. Six parallel algorithms are studied,
and the analytical results compared against previously
published simulation results, to establish the
confidence level of the data reference modeling
technique. It is found that the average prediction
error for four out of six algorithms is within five
percent and within ten percent for the other two. The
paper also derives from the model several results on
how cache miss rates scale with system size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:1993:ETS,
author = "Margaret Martonosi and Anoop Gupta and Thomas
Anderson",
title = "Effectiveness of trace sampling for performance
debugging tools",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "248--259",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently there has been a surge of interest in
developing performance debugging tools to help
programmers tune their applications for better memory
performance [2, 4, 10]. These tools vary both in the
detail of feedback provided to the user, and in the
run-time overbead of using them. MemSpy [10] is a
simulation-based tool which gives programmers detailed
statistics on the memory system behavior of
applications. It provides information on the frequency
and causes of cache misses, and presents it in terms of
source-level data and code objects with which the
programmer is familiar. However, using MemSpy increases
a program's execution time by roughly 10 to 40 fold.
This overhead is generally acceptable for applications
with execution times of several minutes or less, but it
can be inconvenient when tuning applications with very
long execution times. This paper examines the use of
trace sampling techniques to reduce the execution time
overhead of tools like MemSpy. When simulating one
tenth of the references, we find that MemSpy's
execution time overhead is improved by a factor of 4 to
6. That is, the execution time when using MemSpy is
generally within a factor of 3 to 8 times the normal
execution time. With this improved performance, we
observe only small errors in the performance statistics
reported by MemSpy. On moderate sized caches of 16KB to
128KB, simulating as few as one tenth of the references
(in samples of 0.5M references each) allows us to
estimate the program's actual cache miss rate with an
absolute error no greater than 0.3\% on our five
benchmarks. These errors are quite tolerable within the
context of performance bugging. With larger caches we
can also obtain good accuracy by using longer sample
lengths. We conclude that, used with care, trace
sampling is a powerful technique that makes possible
performance debugging tools which provide {\em both\/}
detailed memory statistics {\em and\/} low execution
time overheads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahn:1993:HTS,
author = "Jong-Suk Ahn and Peter B. Danzig and Deborah Estrin
and Brenda Timmerman",
title = "Hybrid technique for simulating high bandwidth delay
computer networks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "260--261",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Researchers evaluate and contrast new network routing,
admission control, congestion control and flow control
algorithms through simulation. Analytically derived
arguments justifiably lack credibility because, in the
attempt to model the underlying physical system, the
analyst is forced to make compromising approximations.
However, unlike analytical techniques like Jackson
Queueing Networks, simulations require significant
computation and a simulation's state can consume a
great deal of memory. This paper describes a technique
that we developed to reduce the memory consumption of
communication network simulators. Reduced memory makes
simulations of larger and higher bandwidth-delay
networks possible, but introduces an adjustable degree
of approximation in the simulation. The higher the
memory savings, the less accurate the computed
measures. We call our technique {\em Flowsim}. The
paper motivates the need to simulate computer networks
rather than model them analytically, motivates why a
simulator's state can grow quickly, and explains why
analytical techniques have failed to model modern
communication networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Becker:1993:AIC,
author = "Jeffrey C. Becker and Arvin Park",
title = "An analysis of the information content of address and
data reference streams",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "262--263",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent increases in VLSI processor speed and
transistor density have not been matched by a
proportionate increase in the number of I/O pins used
to communicate information on and off chip. Since the
number of I/O pins is limited by packaging technology
and switching constraints, this trend is likely to
continue, and I/O bandwidth will become the primary
VLSI system performance bottleneck. This paper analyzes
the potential of address and data stream coding in
order to reduce bandwidth requirements",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghandeharizadeh:1993:EAV,
author = "Shahram Ghandeharizadeh and Luis Ramos",
title = "An evaluation of alternative virtual replication
strategies for continuous retrieval of multimedia
data",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "264--265",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During the past decade, information technology has
evolved to store and retrieve multimedia data (e.g.,
audio, video). Multimedia information systems utilize a
variety of human senses to provide an effective means
of conveying information. Already, these systems play a
major role in educational applications, entertainment
technology, and library information systems. A
challenging task when implementing these systems is to
support a continuous retrieval of an object at the
bandwidth required by its media type. This is
challenging because certain media types, in particular
video, require very high bandwidths. For example, the
bandwidth required by NTSC (the US standard established
by the National Television System Committee) for
`network-quality' video is about 45 megabits per second
(Mbps). Recommendation 601 of the International Radio
Consultative Committee (CCIR) calls for a 216 Mbps
bandwidth for video objects. A video object based on
the HDTV (High Definition Television) quality images
requires approximately a 700 Mbps bandwidth. Compare
these bandwidth requirements with the typical 10 Mbps
bandwidth of a magnetic disk drive, which is not
expected to increase significantly in the near future.
Currently, there are several ways to support continuous
display of these objects: (1) sacrifice the quality of
the data by using either a lossy compression technique
or a low resolution device, (2) employ the aggregate
bandwidth of several disk drives by declustering an
object across multiple disks [2], and (3) use a
combination of these two techniques. Lossy compression
techniques encode data into a form that consumes a
relatively small amount of space, however, when the
data is decoded, it yields a representation similar to
the original (some loss of data). While it is
effective, there are applications that cannot tolerate
loss of data. As an example consider the video signals
collected from space. This data may not be compressed
using a lossy compression technique. Otherwise, the
scientists who later uncompress and analyze the data
run the risk of either observing phenomena that may not
exist due to a slight change in data or miss important
observations due to some loss of data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kay:1993:STI,
author = "Jonathan Kay and Joseph Pasquale",
title = "A summary of {TCP\slash IP} networking software
performance for the {DECstation 5000}",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "266--267",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network software speed is not increasing as rapidly as
that of work-station CPUs. The goal of this study is to
determine how various components of network software
contribute to this bottleneck. In this extended
abstract, we summarize the performance of TCPIP and
UDPIP networking software for the DECstation 5000/200
workstations connected by an FDDI LAN.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lewandowski:1993:AAP,
author = "Gary Lewandowski and Anne Condon and Eric Bach",
title = "Asynchronous analysis of parallel dynamic
programming",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "268--269",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine a very simple asynchronous model of
parallel computation that assumes the time to compute a
task is random, following some probability
distribution. The goal of this model is to capture the
effects of unexpected delays on processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shin:1993:ELS,
author = "Kang G. Shin and Chao-Ju Hou",
title = "Evaluation of load sharing in {HARTS} while
considering message routing and broadcasting",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "270--271",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we apply the load sharing (LS)
mechanism proposed in [1, 2] to HARTS, an experimental
distributed realtime system [3] currently being built
at the Real-Time Computing Laboratory of the University
of Michigan.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Torrellas:1993:BCA,
author = "Josep Torrellas and Andrew Tucker and Anoop Gupta",
title = "Benefits of cache-affinity scheduling in shared-memory
multiprocessors: a summary",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "272--274",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An interesting and common class of workloads for
shared-memory multiprocessors is multiprogrammed
workloads. Because these workloads generally contain
more processes than there are processors in the
machine, there are two factors that increase the number
of cache misses. First, several processes are forced to
time-share the same cache, resulting in one process
displacing the cache state previously built up by a
second one. Consequently, when the second process runs
again, it generates a stream of misses as it rebuilds
its cache state. Second since an idle processor simply
selects the highest priority runnable process, a given
process often moves from one CPU to another. This
frequent migration results in the process having to
continuously reload its state into new caches,
producing streams of cache misses. To reduce the number
of misses in these workloads, processes should reuse
their cached state more. One way to encourage this is
to schedule each process based on its affinity to
individual caches, that is, based on the amount of
state that the process has accumulated in an individual
cache. This technique is called {\em cache affinity
scheduling}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vetland:1993:CMA,
author = "Vidar Vetland and Peter Hughes and Arne S{\o}lvberg",
title = "A composite modelling approach to software performance
measurement",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "275--276",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditionally performance modellers have tended to
ignore the difficulty of obtaining parameter values
which represent the resource demands of multi-layered
software. In practice the use of performance
engineering in large-scale systems development is
limited by the cost of acquiring appropriate
performance information about the various software
components. However, if this information cart be reused
when components are combined in different ways, then
the cost of measurement can be more easily justified.
Such reuse can be achieved by means of a composite work
model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:1993:AMV,
author = "David B. Wagner",
title = "Approximate mean value analysis of interconnection
networks with deflection routing",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "277--278",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an Approximate Mean Value Analysis
model of deflection routing in Shuffle-Loop
interconnection networks. (The methodology is readily
extended to other network topologies.) In contrast to
most previous work on deflection routing, the model
makes no assumptions about traffic patterns, nor does
it assume that messages that cannot be admitted to the
network are lost. The technique allows the network to
be modeled in its entirety: all processors, switches,
and memory modules, and their steady-state
interactions, are modeled explicitly. The results of
the model are found to be in close agreement with the
results of simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:1993:OFT,
author = "Carey L. Williamson",
title = "Optimizing file transfer response time using the
loss-load curve congestion control mechanism",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "279--280",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Loss-load curves are a recently proposed feedback
mechanism for rate-based congestion control in datagram
computer networks. In the loss-load model, packet loss
inside the network is a direct function of sender
transmission rates, and senders choose their own
transmission rate based on the loss-load tradeoff curve
provided by the network. Earlier work [1] has provided
the mathematical basis for the loss-load model and
provided preliminary simulation results demonstrating
its responsiveness, fairness, and stability. The
loss-load model works Well for simple network
environments where each source has a large number of
packets to transmit, and wishes to maximize raw
throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lipsky:1993:BRI,
author = "Lester Lipsky",
title = "Book review: {``Introduction to Computer System
Performance Evaluation'' by Krishna Kant (McGraw-Hill,
1992)}",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "7--9",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.1044951",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this book, in the author's own words is
`\ldots{} two-fold. First, it should be usable as a
text for a one or two semester graduate course in the
theory and practice of performance evaluation with
strong emphasis on analytic modeling. Second, it should
be useful as a reference to both researchers and
practitioners in the performance evaluation field'. The
recommended prerequisite courses are `probability
theory, operating systems, and computer architecture.'
If one throws in a course in linear algebra or matrix
theory (how can Markov chains be studied without it?)
then one has the typical undergraduate major (or a good
minor) degree in Computer Science/Engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kinicki:1993:BRT,
author = "Robert E. Kinicki",
title = "Book review: {``Telecommunications and Networking'' by
Udo W. Pooch, Denis Machuel and John McCahn (CRC Press,
1991)}",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "9--10",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.1044952",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is intended is an introduction to
telecommunications. In the preface the authors mention
that one of their goals is to present an overview of
the interaction and relationship between
telecommunications and data processing. Thus the text
is divided into three parts --- basics of
telecommunications, transmission systems, and
networking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:1993:SCM,
author = "Xiren Cao",
title = "Some common misconceptions about performance modeling
and validation",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "11--15",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.174217",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing networks and Markov processes etc. are widely
used in modeling computer systems and communication
networks to study their performance and reliability. To
solve a real world problem, the model developed has to
be validated through measured data. In this paper, we
point out that in validating a model, one has to be
very clear about one's claims regarding what has been
validated; Too `accurate' results do not imply a
correct model and usually indicates a validation
problem. We discuss some common misconceptions in
performance modeling and validation. We illustrate our
points through examples. To capture the main concepts,
the problems are simplified in these examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maffeis:1993:CMA,
author = "Silvano Maffeis",
title = "Cache management algorithms for flexible filesystems",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "16--25",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.174219",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache management in flexible filesystems deals with
the problem of determining a cached file to be replaced
when the local cache space is exhausted. In analogy to
virtual memory management, several different algorithms
exist for managing cached files. In this paper we
simulate the behavior of {\em First-In-First-Out
(FIFO), Least Recently Used (LRU), Least Frequently
Used (LFU)\/} and a variation of LFU we call the {\em
File Length Algorithm (LEN)\/} from the viewpoint of
file access times, cache hit ratios and availability.
The results of several simulation runs are presented
and interpreted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{UI:1993:PMA,
author = "{UNIX International}",
title = "Performance management activities within {UNIX
International}",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "42--42",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.174221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The primary output of the UNIX International Work
Group on Performance Measurement is a set of
requirements and recommendations to UNIX International
and UNIX System Laboratories for the development of
standard performance measurement interfaces to the UNIX
System. Requirements will be based on the collective,
non-vendor specific needs for a standard performance
architecture. Currently the lack of this standard
causes undue porting and kernel additions by each UNIX
System vendor as well as a great variety of approaches
to gain the same basic performance insight into the
system. Building tools to monitor, display, model, or
predict performance or its trends is a frustrating and
currently single vendor enterprise. By providing
standard data structures, types of performance data
gathered, and a common kernel interface to collect this
data, the whole UNIX system vendor community along with
the UNIX software vendors can develop performance tools
which last more than one UNIX release and work on
multiple UNIX platforms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dujmovic:1994:BRB,
author = "Jozo J. Dujmovi{\'c}",
title = "Book review: {``The Benchmarking Handbook for Database
and Transaction Processing Systems'' Edited by Jim Gray
(Morgan Kaufmann Publishers, Inc., 1991)}",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "4--5",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.1044953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is a short, complete summary of the most
important approaches to performance measurements of
database systems and transaction processing systems. It
is intended to serve as a tutorial for the novice and a
reference for the professional. Included are
contributions by ten authors: Dina Bitton, Rick
Cattell, David DeWitt, Jim Gray, Neal Nelson, Patrick
O'Neil, Tom Sawyer, Omri Serlin, Carolyn Turbyfill, and
Cyril Orji.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1994:BRE,
author = "David Finkel",
title = "Book review: {``Encyclopedia of Computer Science'',
Third Edition, edited by Anthony Ralston and Edwin I.
Reilly (Van Nostrand Reinhold, 1993)}",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "6--6",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.1044954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The new edition of the well-regarded {\em Encyclopedia
of Computer Science\/} is truly impressive. It's over
1500 pages long, with over 700 articles. While some
articles just define a term in a few paragraphs, others
are much more extensive: the article on operating
systems is 25 pages long. There's even a twelve-page
section of full-color illustrations, with the expected
pictures of computer graphics, fractals, and scientific
visualization, as well as an unexpected section of
illustrations of postage stamps dealing with
computing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schieber:1994:RRT,
author = "Colleen D. Schieber and Eric E. Johnson",
title = "{RATCHET}: real-time address trace compression
hardware for extended traces",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "22--32",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.181842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The address traces used in computer architecture
research are commonly generated using software
techniques that introduce time dilations of an order of
magnitude or more. Such techniques may also omit
classes of memory references that are important for
accurate models of computer systems, such as
instruction prefetches, operating system references,
and interrupt activity. We describe a technique for
capturing all classes of references in real time.
RATCHET employs trace filtering hardware to reduce the
bandwidth and storage requirements that have previously
limited the usefulness of hardware-based tracing. In
evaluating this technique using the ten SPEC89
benchmark programs running on a Sun-3/60 workstation,
we found that a small filter cache achieves compression
ratios in the 10--30 range during the startup section
of the programs. Traces from the middle sections of the
C programs achieved compression ratios of 20--30, while
the FORTRAN codes produced ratios of 45--84. Traces
from a smaller ionospheric simulator program were
compressed by factors of 100.These filtered traces
typically represent 36 million contiguous references.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1994:SCQ,
author = "Surendra M. Gupta and Fikri Karaesmen",
title = "Solution to complex queueing systems: a spreadsheet
approach",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "33--46",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.181843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, some very useful and applicable ideas
are presented to facilitate solving complex problems in
Queueing Theory. It is demonstrated how a spreadsheet
can be used to solve problems which many practitioners
find very intimidating. To this end an algorithm is
presented which is particularly designed for easy
implementation in a spreadsheet. A template is provided
illustrating the implementation of the algorithm. The
use of the template is demonstrated in various queueing
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "queueing systems; spreadsheets",
}
@Article{Denning:1994:FLK,
author = "Peter J. Denning",
title = "The fifteenth level (keynote address)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "1--4",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peris:1994:AIM,
author = "Vinod G. J. Peris and Mark S. Squillante and Vijay K.
Naik",
title = "Analysis of the impact of memory in distributed
parallel processing systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "5--18",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an important tradeoff between processor
and memory allocation in distributed parallel
processing systems. To study this tradeoff, we
formulate stochastic models of parallel program
behavior, distributed parallel processing environments
and memory overheads incurred by parallel programs as a
function of their processor allocation. A mathematical
analysis of the models is developed, which includes the
effects of contention for shared resources caused by
paging activity. We conduct a detailed analysis of real
large-scale scientific applications and use these
results to parameterize our models. Our results show
that memory overhead resulting from processor
allocation decisions can have a significant effect on
system performance in distributed parallel
environments, strongly suggesting that memory
considerations must be incorporated in the resource
allocation policies for parallel systems. We also
demonstrate the importance of the inter-locality miss
ratio, which is introduced in this paper and analyzed
for the first time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McCann:1994:PAP,
author = "Cathy McCann and John Zahorjan",
title = "Processor allocation policies for message-passing
parallel computers",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "19--32",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When multiple jobs compete for processing resources on
a parallel computer, the operating system kernel's
processor allocation policy determines how many and
which processors to allocate to each. In this paper we
investigate the issues involved in constructing a
processor allocation policy for large scale,
message-passing parallel computers supporting a
scientific workload. We make four specific
contributions: We define the concept of efficiency
preservation as a characteristic of processor
allocation policies. Efficiency preservation is the
degree to which the decisions of the processor
allocator degrade the processor efficiencies
experienced by individual applications relative to
their efficiencies when run alone. We identify the
interplay between the kernel processor allocation
policy and the application load distribution policy as
a determinant of efficiency preservation. We specify
the details of two families of processor allocation
policies, called Equipartition and Folding. Within each
family, different member policies cover a range of
efficiency preservation values, from very high to very
low. By comparing policies within each family as well
as between families, we show that high efficiency
preservation is essential to good performance, and that
efficiency preservation is a more dominant factor in
obtaining good performance than is equality of resource
allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:1994:UAC,
author = "Su-Hui Chiang and Rajesh K. Mansharamani and Mary K.
Vernon",
title = "Use of application characteristics and limited
preemption for run-to-completion parallel processor
scheduling policies",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "33--44",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance potential of run-to-completion (RTC)
parallel processor scheduling policies is investigated
by examining whether (1) application execution rate
characteristics such as average parallelism (avg) and
processor working set (PWS) and/or (2) limited
preemption can be used to improve the performance of
these policies. We address the first question by
comparing policies (previous as well as new) that
differ only in whether or not they use execution rate
characteristics and by examining a wider range of the
workload parameter space than previous studies. We
address the second question by comparing a simple
two-level queueing policy with RTC scheduling in the
second level queue against RTC policies that don't
allow any preemption and against dynamic
equiallocation(EQ).Using simulation to estimate mean
response times we find that for promising RTC policies
such as adaptive static partitioning (ASP) and shortest
demand first (SDF), a maximum allocation constraint
that is for all practical purposes independent of avg
and pws provides greater and more consistent
improvement in policy performance than using avg or
pws. Also, under the assumption that job demand
information is unavailable to the scheduler we show
that the ASP-max policy outperforms all previous high
performance RTC policies for workloads with coefficient
of variation in processing requirement greater than
one. Furthermore, a two-level queue that allows at most
one preemption per job outperforms ASP-max but is not
competitive with EQ.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:1994:SMQ,
author = "Joel L. Wolf and John Turek and Ming-Syan Chen and
Philip S. Yu",
title = "Scheduling multiple queries on a parallel machine",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "45--55",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183024",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been a good deal of progress made recently
towards the efficient parallelization of individual
phases of single queries in multiprocessor database
systems. In this paper we devise and evaluate a number
of scheduling algorithms designed to handle multiple
parallel queries. One of these algorithms emerges as a
clear winner. This algorithm is hierarchical in nature:
In the first phase, a good quality precedence-based
schedule is created for each individual query and each
possible number of processors. This component employs
dynamic programming. In the second phase, the results
of the first phase are used to create an overall
schedule of the full set of queries. This component is
based on previously published work on
nonprecedence-based malleable scheduling. Even though
the problem we are considering is NP-hard in the strong
sense, the multiple query schedules generated by our
hierarchical algorithm are seen experimentally to
achieve results which are close to optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:1994:AMH,
author = "Jignesh M. Patel and Michael J. Carey and Mary K.
Vernon",
title = "Accurate modeling of the hybrid hash join algorithm",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "56--66",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The join of two relations is an important operation in
database systems. It occurs frequently in relational
queries, and join performance is a significant factor
in overall system performance. Cost models for join
algorithms are used by query optimizers to choose
efficient query execution strategies. This paper
presents an efficient analytical model of an important
join method, the hybrid hash join algorithm, that
captures several key features of the algorithm's
performance --- including its intra-operator
parallelism, interference between disk reads and
writes, caching of disk pages, and placement of data on
disk(s). Validation of the model against a detailed
simulation of a database system shows that the response
time estimates produced by the model are quite
accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bittan:1994:APB,
author = "Avi Bittan and Yaakov Kogan and Philip S. Yu",
title = "Asymptotic performance of a buffer model in a data
sharing environment",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "67--76",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a transaction processing system is
very sensitive to the buffer hit probability. In a data
sharing environment where multiple computing nodes are
coupled together with direct access to shared data on
disks, buffer coherency needs to be maintained such
that if a data granule is updated by a node, the old
copies of this granule present in the buffer of other
nodes must be invalidated. The buffer invalidation
phenomenon reduces the buffer hit probability in a
multi-node environment. After the buffer reaches a
certain size, the buffer hit probability will remain
constant regardless of further increase in buffer size
due to the buffer invalidation effect. This puts an
upper limit on the achievable buffer hit probability.
Thus the selection of appropriate buffer size is one of
the critical issues in a data sharing environment. In
this paper, we develop an asymptotic analysis of the
Markov model for a buffer in the data sharing
environment. Important relations between buffer size,
number of nodes, write-probability and the size of the
database to the buffer hit probability had been found
in all range of system parameters. A simple expression
is obtained for the maximum achievable buffer hit
probability and also for the maximum usable buffer
size. Various properties of the maximum achievable
buffer hit probability and usable buffer size are
derived for a skewed access workload. The accuracy of
the asymptotic method is validated by numerous case
studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Petriu:1994:AMV,
author = "Dorina C. Petriu",
title = "Approximate mean value analysis of client-server
systems with multi-class requests",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "77--86",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic Rendezvous Networks (SRVNs) are performance
models for multitasking parallel software with
intertask communication via rendezvous introduced in
[1], which are very appropriate to model client-server
systems. SRVNs differ from Queueing Networks (QNs) in
two ways: nodes act as both clients and servers
(allowing for nested service), and servers have two
distinct phases of service --- the first one ``in RV''
with the client, and the second ``after RV'', executed
in parallel with the client. Early work on solving SRVN
models has used a kind of approximate Mean Value
Analysis based on heuristic ad hoc assumptions to
determine the task queue properties at the instant of
RV request arrivals. Approximation are necessary since
SRVN violates product form. Recently, a more rigorous
approach was proposed in [2] for the solution of SRVN
models, based on a special aggregation (named
``Task-Directed Aggregation'' TDA) of the Markov chain
model describing the interference of different clients
that contend for a single server with FIFO queueing
discipline and different service times. The algorithm
derived in [2] has the limitation that each client may
require only a single class of service. In general, a
software server offers a range of services with
different workloads and functionalities, and a client
may need more than one service. The present paper uses
the TDA approach to derive an extended algorithm which
allows a client to require any number of services from
a server by changing randomly the request class. The
new algorithm is incorporated into a decomposition
method for models with any number of servers. The SRVN
modelling technique is applied to a large case study of
a distributed database system, giving insight into the
behaviour of the system and helping to identify
performance problems such as software bottle-neck.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balbo:1994:ATP,
author = "G. Balbo and S. C. Bruell and M. Sereno",
title = "Arrival theorems for product-form stochastic {Petri}
nets",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "87--97",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a particular class of Stochastic Petri
Nets whose stationary probabilities at arbitrary
instants exhibit a product form. We study these nets at
specific instants in the steady state that occur
directly after the firing of a transition. We focus our
attention on the instant after tokens are removed from
the places specified by a transition's input bag and
just before tokens are entered into the places
specified by the same transition's output bag. We show
that the stationary probabilities at ``arrival
instants'' are related to corresponding stationary
probabilities at arbitrary instants in net(s) with
lower load. We then show how one of the ``arrival''
theorems can be applied to the derivation of a formula
for the mean sojourn time of a token in a place at
steady state. This is the basis for the development of
a Mean Value Analysis algorithm for the computation of
performance indices for Product-Form Stochastic Petri
Nets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Somani:1994:PMS,
author = "Arun K. Somani and Kishor S. Trivedi",
title = "Phased-mission system analysis using {Boolean}
algebraic methods",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "98--107",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most reliability analysis techniques and tools assume
that a system is used for a mission consisting of a
single phase. However, multiple phases are natural in
many missions. The failure rates of components, system
configuration, and success criteria may vary from phase
to phase. In addition, the duration of a phase may be
deterministic or random. Recently, several researchers
have addressed the problem of reliability analysis of
such systems using a variety of methods. We describe a
new technique for phased-mission system reliability
analysis based on Boolean algebraic methods. Our
technique is computationally efficient and is
applicable to a large class of systems for which the
failure criterion in each phase can be expressed as a
fault tree (or an equivalent representation). Our
technique avoids state space explosion that commonly
plague Markov chain-based analysis. We develop a phase
algebra to account for the effects of variable
configurations and success criteria from phase to
phase. Our technique yields exact (as opposed to
approximate) results. We demonstrate the use of our
technique by means of an example and present numerical
results to show the effects of mission phases on the
system reliability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Boolean algebraic methods; fault trees; phased-mission
systems; random phase duration; reconfiguration;
reliability analysis; ultra-reliable computer system;
variable success criteria",
}
@Article{Ebling:1994:SEF,
author = "Maria R. Ebling and M. Satyanarayanan",
title = "{SynRGen}: an extensible file reference generator",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "108--117",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SynRGen, a synthetic file reference generator
operating at the system call level, is capable of
modeling a wide variety of usage environments. It
achieves realism through trace-inspired micromodels and
flexibility by combining these micromodels
stochastically. A micromodel is a parameterized piece
of code that captures the distinctive signature of an
application. We have used SynRGen extensively for
stress testing the Coda File System. We have also
performed a controlled experiment that demonstrates
SynRGen's ability to closely emulate real users ---
within 20\% of many key system variables. In this paper
we present the rationale, detailed design, and
evaluation of SynRGen, and mention its applicability to
broader uses such as performance evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raghavan:1994:GNM,
author = "S. V. Raghavan and D. Vasukiammaiyar and Gunter
Haring",
title = "Generative networkload models for a single server
environment",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "118--127",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183031",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Any performance evaluation study requires a concise
description of the workload under which the performance
of the system is to be evaluated. Also, the
repeatability of the experiments for different workload
profiles, requires that the workload models generate
the workload profiles parametrically. Such a model,
should preferably be time-invariant, consistent and
generative. We view the networkload as a sequence that
can be generated from the rules of a Context Free
Grammar (CFG). Our approach combines the established
practice of viewing the workload as ``consisting of a
hierarchy'' and the CFG description, to produce a
generative networkload model. The networkload model is
applied to a SingleServer--MultipleClients network by
deriving the networkload model parameters from an
operational SingleServer network of personal computers.
The time-invariance and generative nature are verified
experimentally. The usefulness of such a description of
the networkload to study the resource management
problems of a network, like the optimal allocation of
clients to servers, is explored by using the generative
model as input descriptor to a queueing network model
of SingleServer network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cmelik:1994:SFI,
author = "Bob Cmelik and David Keppel",
title = "{Shade}: a fast instruction-set simulator for
execution profiling",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "128--137",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tracing tools are used widely to help analyze, design,
and tune both hardware and software systems. This paper
describes a tool called Shade which combines efficient
instruction-set simulation with a flexible, extensible
trace generation capability. Efficiency is achieved by
dynamically compiling and caching code to simulate and
trace the application program. The user may control the
extent of tracing in a variety of ways; arbitrarily
detailed application state information may be collected
during the simulation, but tracing less translates
directly into greater efficiency. Current Shade
implementations run on SPARC systems and simulate the
SPARC (Versions 8 and 9) and MIPS I instruction sets.
This paper describes the capabilities, design,
implementation, and performance of Shade, and discusses
instruction set emulation in general.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Noble:1994:ESH,
author = "Brian D. Noble and M. Satyanarayanan",
title = "An empirical study of a highly available file system",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "138--149",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present results from a six-month
empirical study of the high availability aspects of the
Coda File System. We report on the service failures
experienced by Coda clients, and show that such
failures are masked successfully. We also explore the
effectiveness and resource costs of key aspects of
server replication and disconnected operation, the two
high availability mechanisms of Coda. Wherever
possible, we compare our measurements to
simulation-based predictions from earlier papers and to
anecdotal evidence from users. Finally, we explore how
users take advantage of the support provided by Coda
for mobile computing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dahlin:1994:QAC,
author = "Michael D. Dahlin and Clifford J. Mather and Randolph
Y. Wang and Thomas E. Anderson and David A. Patterson",
title = "A quantitative analysis of cache policies for scalable
network file systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "150--160",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current network file system protocols rely heavily on
a central server to coordinate file activity among
client workstations. This central server can become a
bottleneck that limits scalability for environments
with large numbers of clients. In central server
systems such as NFS and AFS, all client writes, cache
misses, and coherence messages are handled by the
server. To keep up with this workload, expensive server
machines are needed, configured with high-performance
CPUs, memory systems, and I/O channels. Since the
server stores all data, it must be physically capable
of connecting to many disks. This reliance on a central
server also makes current systems inappropriate for
wide area network use where the network bandwidth to
the server may be limited.In this paper, we investigate
the quantitative performance effect of moving as many
of the server responsibilities as possible to client
workstations to reduce the need for high-performance
server machines. We have devised a cache protocol in
which all data reside on clients and all data transfers
proceed directly from client to client. The server is
used only to coordinate these data transfers. This
protocol is being incorporated as part of our
experimental file system, xFS. We present results from
a trace-driven simulation study of the protocol using
traces from a 237 client NFS installation. We find that
the xFS protocol reduces server load by more than a
factor of six compared to AFS without significantly
affecting response time or file availability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kotz:1994:ELS,
author = "David Kotz and Preston Crow",
title = "The expected lifetime of ``single-address-space''
operating systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "161--170",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trends toward shared-memory programming paradigms,
large (64-bit) address spaces, and memory-mapped files
have led some to propose the use of a single
virtual-address space, shared by all processes and
processors. Typical proposals require the single
address space to contain all process-private data,
shared data, and stored files. To simplify management
of an address space where stable pointers make it
difficult to re-use addresses, some have claimed that a
64-bit address space is sufficiently large that there
is no need to ever re-use addresses. Unfortunately,
there has been no data to either support or refute
these claims, or to aid in the design of appropriate
address-space management policies. In this paper, we
present the results of extensive kernel-level tracing
of the workstations in our department, and discuss the
implications for single-address-space operating
systems. We found that single-address-space systems
will not outgrow the available address space, but only
if reasonable space-allocation policies are used, and
only if the system can adapt as larger address space
becomes available.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivasubramaniam:1994:ASS,
author = "Anand Sivasubramaniam and Aman Singla and Umakishore
Ramachandran and H. Venkateswaran",
title = "An approach to scalability study of shared memory
parallel systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "171--180",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The overheads in a parallel system that limit its
scalability need to be identified and separated in
order to enable parallel algorithm design and the
development of parallel machines. Such overheads may be
broadly classified into two components. The first one
is intrinsic to the algorithm and arises due to factors
such as the work-imbalance and the serial fraction. The
second one is due to the interaction between the
algorithm and the architecture and arises due to
latency and contention in the network. A top-down
approach to scalability study of shared memory parallel
systems is proposed in this research. We define the
notion of overhead functions associated with the
different algorithmic and architectural characteristics
to quantify the scalability of parallel systems; we
isolate the algorithmic overhead and the overheads due
to network latency and contention from the overall
execution time of an application; we design and
implement an execution-driven simulation platform that
incorporates these methods for quantifying the overhead
functions; and we use this simulator to study the
scalability characteristics of five applications on
shared memory platforms with different communication
topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mehra:1994:CTM,
author = "Pankaj Mehra and Catherine H. Schulbach and Jerry C.
Yan",
title = "A comparison of two model-based performance-prediction
techniques for message-passing parallel programs",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "181--190",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes our experience in modeling two
significant parallel applications: ARC2D, a
2-dimensional Euler solver; and, Xtrid, a tridiagonal
linear solver. Both of these models were expressed in
BDL (Behavior Description language) and simulated on an
iPSC/860 Hypercube modeled using Axe (Abstract
eXecution Environment). BDL models consist of abstract
communicating objects: blocks of sequential code are
modeled by single RUN statements; all communication
operations in the original code are mirrored by
corresponding BDL operations in the model. Our ARC2D
model was built by first profiling the program to
locate the significant loops and then timing the basic
blocks within those loops. Simulated completion times
were (except in one case) within 8\% of measured
execution times. Lengthy simulations were necessary for
predicting the performance of large-scale runs. For
Xtrid, only the loops surrounding communications were
modeled; other loops were absorbed into large
sequential blocks whose complexity was estimated using
statistical regression. This approach yielded a much
smaller model whose computation and communication
complexities were clearly manifest. Analysis of
complexity allowed rapid prediction of large-scale
performance without lengthy simulations! Analytically
predicted speed-ups were within 7\% of those predicted
by simulation. Simulated completion times were within
5\% of measured execution times. The second approach
provides a more effective methodology for
simulation-based performance-tuning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Horton:1994:MLS,
author = "Graham Horton and Scott T. Leutenegger",
title = "A multi-level solution algorithm for steady-state
{Markov} chains",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "191--200",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new iterative algorithm, the multi-level algorithm,
for the numerical solution of steady state Markov
chains is presented. The method utilizes a set of
recursively coarsened representations of the original
system to achieve accelerated convergence. It is
motivated by multigrid methods, which are widely used
for fast solution of partial differential equations.
Initial results of numerical experiments are reported,
showing significant reductions in computation time,
often an order of magnitude or more, relative to the
Gauss--Seidel and optimal SOR algorithms for a variety
of test problems. It is shown how the well-known
iterative aggregation-disaggregation algorithm of
Takahashi can be interpreted as a special case of the
new method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Das:1994:AMM,
author = "Samir R. Das and Richard M. Fujimoto",
title = "An adaptive memory management protocol for {Time Warp}
parallel simulation",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "201--210",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is widely believed that Time Warp is prone to two
potential problems: an excessive amount of wasted,
rolled back computation resulting from ``rollback
thrashing'' behaviors, and inefficient use of memory,
leading to poor performance of virtual memory and/or
multiprocessor cache systems. An adaptive mechanism is
proposed based on the Cancelback memory management
protocol that dynamically controls the amount of memory
used in the simulation in order to maximize
performance. The proposed mechanism is adaptive in the
sense that it monitors the execution of the Time Warp
program, automatically adjusts the amount of memory
used to reduce Time Warp overheads (fossil collection,
Cancelback, the amount of rolled back computation,
etc.) to a manageable level. The mechanism is based on
a model that characterizes the behavior of Time Warp
programs in terms of the flow of memory buffers among
different buffer pools. We demonstrate that an
implementation of the adaptive mechanism on a Kendall
Square Research KSR-1 multiprocessor is effective in
automatically maximizing performance while minimizing
memory utilization of Time Warp programs, even for
dynamically changing simulation models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:1994:PEE,
author = "Hui Zhang and Edward W. Knightly",
title = "Providing end-to-end statistical performance
guarantees with bounding interval dependent stochastic
models",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "211--220",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper demonstrates a new, efficient, and general
approach for providing end-to-end performance
guarantees in integrated services networks. This is
achieved by modeling a traffic source with a family of
bounding interval-dependent (BIND) random variables and
by using a rate-controlled service discipline inside
the network. The traffic model stochastically bounds
the number of bits sent over time intervals of
different length. The model captures different source
behavior over different time scales by making the
bounding distribution an explicit function of the
interval length. The service discipline, RCSP, has the
priority queueing mechanisms necessary to provide
performance guarantees in integrated services networks.
In addition, RCSP provides the means for efficiently
extending the results from a single switch to a network
of arbitrary topology. These techniques are derived
analytically and then demonstrated with numerical
examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pingali:1994:CSI,
author = "Sridhar Pingali and Don Towsley and James F. Kurose",
title = "A comparison of sender-initiated and
receiver-initiated reliable multicast protocols",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "221--230",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sender-initiated reliable multicast protocols, based
on the use of positive acknowledgments (ACKs), lead to
an ACK implosion problem at the sender as the number of
receivers increases. Briefly, the ACK implosion problem
refers to the significant overhead incurred by the
sending host due to the processing of ACKs from each
receiver. A potential solution to this problem is to
shift the burden of providing reliable data transfer to
the receivers --- thus resulting in a
receiver-initiated multicast error control protocol
based on the use of negative acknowledgments (NAKs). In
this paper we determine the maximum throughputs of the
sending and receiving hosts for generic
sender-initiated and receiver-initiated protocols. We
show that the receiver-initiated error control
protocols provide substantially higher throughputs than
their sender-initiated counterparts. We further
demonstrate that the introduction of random delays
prior to generating NAKs coupled with the multicasting
of NAKs to all receivers has the potential for an
additional substantial increase in the throughput of
receiver-initiated error control protocols over
sender-initiated protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nikolaidis:1994:TPS,
author = "Ioanis Nikolaidis and Richard Fujimoto and C. Anthony
Cooper",
title = "Time-parallel simulation of cascaded statistical
multiplexers",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "231--240",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The multiplexing of several lightly loaded links onto
a more heavily loaded output link is a problem of
considerable importance to the design and traffic
engineering of many types of packet-oriented
telecommunications equipment, including that used in
Asynchronous Transfer Mode (ATM) networks. Network
configurations generally require the cascaded operation
of such multiplexers and switches. Important objectives
to achieve small cell loss ratios while maintaining
efficient utilization of the transmission links. The
small cell loss ratio objective results in extremely
long simulation runs. To address this problem, we
propose a new technique that relies on a compact
description for the arriving/departing traffic at the
multiplexers and a time-parallel scheme without fix-up
phases for effective parallelization. The technique
does not make assumptions about the analytical nature
of the arrival process, thereby allowing trace-driven
simulations to be performed as well. We demonstrate the
method for a number of configurations and traffic
scenarios, and observe that it yields one to two orders
of magnitude speedup on a 32 processor Kendall Square
Research KSR-1 multiprocessor compared to an efficient
cell-level simulation executing on a Sparc-10
workstation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Worthington:1994:SAM,
author = "Bruce L. Worthington and Gregory R. Ganger and Yale N.
Patt",
title = "Scheduling algorithms for modern disk drives",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "241--251",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183045",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk subsystem performance can be dramatically
improved by dynamically ordering, or scheduling,
pending requests. Via strongly validated simulation, we
examine the impact of complex logical-to-physical
mappings and large prefetching caches on scheduling
effectiveness. Using both synthetic workloads and
traces captured from six different user environments,
we arrive at three main conclusions: (1) Incorporating
complex mapping information into the scheduler provides
only a marginal (less than 2\%) decrease in response
times for seek-reducing algorithms. (2) Algorithms
which effectively utilize prefetching disk caches
provide significant performance improvements for
workloads with read sequentiality. The cyclical scan
algorithm (C-LOOK), which always schedules requests in
ascending logical order, achieves the highest
performance among seek-reducing algorithms for such
workloads. (3) Algorithms that reduce overall
positioning delays produce the highest performance
provided that they recognize and exploit a prefetching
cache.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1994:OMC,
author = "David M. Nicol and Shahid H. Bokhari",
title = "Optimal multiphase complete exchange on
circuit-switched hypercube architectures",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "252--260",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complete-exchange communication primitive on a
distributed memory multiprocessor calls for every
processor to send a message to every other processor,
each such message being unique. For circuit-switched
hypercube networks there are two well-known schemes for
implementing this primitive. Direct exchange minimizes
communication volume but maximizes startup costs, while
Standard Exchange minimizes startup costs at the price
of higher communication volume. This paper analyzes a
hybrid, which can be thought of as a sequence of Direct
Exchange phases, applied to variable-sized subcubes.
This paper examines the problem of determining the
optimal subcube dimension sizes $ d_i $ for every
phase. We show that optimal performance is achieved
using some equi-partition, where $ |d_i - d_j| \leq 1 $
for all phases $i$ and $j$. We study the behavior of
the optimal partition as a function of machine
communication parameters, hypercube dimension, and
message size, and show that the optimal partition can
be determined with no more than $ 2 d + 1$ comparisons.
Finally we validate the model empirically, and for
certain problem instances observe as much as a factor
of two improvement over the other methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Temam:1994:CIP,
author = "O. Temam and C. Fricker and W. Jalby",
title = "Cache interference phenomena",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "261--271",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The impact of cache interferences on program
performance (particularly numerical codes, which
heavily use the memory hierarchy) remains unknown. The
general knowledge is that cache interferences are
highly irregular, in terms of occurrence and intensity.
In this paper, the different types of cache
interferences that can occur in numerical loop nests
are identified. An analytical method is developed for
detecting the occurrence of interferences and, more
important, for computing the number of cache misses due
to interferences. Simulations and experiments on real
machines show that the model is generally accurate and
that most interference phenomena are captured.
Experiments also show that cache interferences can be
intense and frequent. Certain parameters such as array
base addresses or dimensions can have a strong impact
on the occurrence of interferences. Modifying these
parameters only can induce global execution time
variations of 30\% and more. Applications of these
modeling techniques are numerous and range from
performance evaluation and prediction to enhancement of
data locality optimizations techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache interferences or conflicts; data locality;
modeling; numerical codes; performance evaluation",
}
@Article{Danskin:1994:PXP,
author = "John Danskin and Pat Hanrahan",
title = "Profiling the {X} protocol (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "272--273",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Drapeau:1994:TWC,
author = "Ann L. Drapeau and David A. Patterson and Randy H.
Katz",
title = "Toward workload characterization of video server and
digital library applications (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "274--275",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183049",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gill:1994:CSF,
author = "Deepinder S. Gill and Songnian Zhou and Harjinder S.
Sandhu",
title = "A case study of file system workload in a large-scale
distributed environment",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "276--277",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:1994:CTD,
author = "Joseph L. Hellerstein",
title = "A comparison of techniques for diagnosing performance
problems in information systems (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "278--279",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1994:EUL,
author = "J. William Lee",
title = "Efficient user-level communication on multicomputers
with an optimistic flow-control protocol (extended
abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "280--281",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rolia:1994:MRP,
author = "J. A. Rolia and M. Starkey and G. Boersma",
title = "Modeling {RPC} performance",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "282--283",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183053",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed computing applications are collections of
processes allocated across a network that cooperate to
accomplish common goals. The applications require the
support of a distributed computing runtime environment
that provides services to help manage process
concurrency and interprocess communication. This
support helps to hide much of the inherent complexity
of distributed environments via industry standard
interfaces and permits developers to create more
portable applications. The resource requirements of the
runtime services can be significant and may impact
application performance and system throughput. This
paper describes work done to study the potential
benefits of redesigning some aspects of the DCE RPC and
its current implementation on a specific platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tayyab:1994:SPM,
author = "Athar B. Tayyab and Jon G. Kuhl",
title = "Stochastic performance models of parallel task systems
(extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "284--285",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers the class of parallel
computations represented by directed, acyclic task
graphs. These include parallel loops, multiphase
algorithms, partitioning and merging algorithms, as
well as any arbitrary parallel computation that can be
structured by a task graph. The paper reviews the
current state of the art in stochastic bound models of
parallel programs and presents new stochastic bound
performance models that predict the expected execution
time of parallel programs on a given shared-memory
multiprocessor system; and provide qualitative and
quantitative description of the relationships between
the structure of parallel programs, computation and
synchronization behavior of the program, and
architectural features of the underlying multiprocessor
system.The models use a new formulation based on
stochastic bound analysis and are solvable for a number
of distribution functions. They are applicable to
shared-memory multiprocessors with significantly
different architectural and synchronization performance
characteristics. The accuracy of the models is
validated via several measurements on two different
shared-memory multiprocessor systems, the Alliant
FX/2800 and the Encore Multimax. The results show the
models to be quite accurate, even when some of the
modeling assumptions are violated. The maximum error of
prediction ranges from about 10\% to under 1\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Uhlig:1994:KBM,
author = "Richard Uhlig and David Nagle and Trevor Mudge and
Stuart Sechrest",
title = "Kernel-based memory simulation (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "286--287",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wabnig:1994:PPP,
author = "Harald Wabnig and G{\"u}nter Haring",
title = "Performance prediction of parallel systems with
scalable specifications --- methodology and case
study",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "288--289",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lavenberg:1995:SPS,
author = "S. S. Lavenberg",
title = "Selected publications of the {Systems Analysis and
Systems Applications} department of the {IBM T. J.
Watson Research Center}",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "6--17",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanley:1995:TDM,
author = "Kim Shanley and Tracy Derossett",
title = "{TPC-D} measures how quickly real-world business
questions can be answered",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "18--45",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wabnig:1995:PPP,
author = "H. Wabnig and G. Haring",
title = "Performance prediction of parallel systems with
scalable specifications --- methodology and case
study",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "46--62",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the general methodology of
specifying parallel systems within the PAPS
(Performance Analysis of Parallel Systems) toolset and
presents a case study that shows the applicability and
accuracy of the Petri net based performance prediction
tools contained in the toolset. Parallel systems are
specified in the PAPS toolset by separately defining
the program workload, the hardware resources, and the
mapping of the program to the hardware. The resource
parameterization is described in detail for a
multiprocessor computer with a store {\&} forward
communication network. The Gaussian elimination
algorithm is taken as a workload example to demonstrate
how regularly structured parallel algorithms are
modelled with acyclic task graphs. Three different
program specifications with various levels of model
accuracy are developed and their parameterization is
described. The predicted execution time is compared
with the measured execution times of the real program
on the parallel hardware. It is shown that the Petri
net based performance prediction tools provide accurate
performance predictions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1995:QMS,
author = "Surendra M. Gupta",
title = "Queueing model with state dependent balking and
reneging: its complementary and equivalence",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "63--72",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202104",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, the concepts of complementarity and
equivalence between an {\em M/M/c/K\/} queueing model
with state dependent balking and reneging and a machine
interference problem with warm standbys are formalized.
The relationship provides insight into these queueing
systems. Through a series of corollaries, relationships
between various queueing systems are derived. It is
shown that a recently reported relationship between
Erlang loss system and a finite source queueing system
is a trivial consequence of the more general results
presented here. New results involving the arrival point
probabilities and measures of performance for these two
queueing systems are also presented. An example is also
provided.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keehn:1995:VPF,
author = "D. G. Keehn",
title = "Visualizing performance in the frequency plane",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "73--81",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202105",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method of showing the performance limiting effects
of a product form queueing network as lines, planes,
etc in a $J$ dimensional space is given. The location
of a certain critical point (Little's Law Point) in
this space allows the asymptotic calculation of the
normalizing constant G(K) of the network. This Little's
Law point (LLP) is found by applying Little's Law to
the augmented system generating function of the BCMP
[1] network. The computational complexity of this
algorithm is the Order (number of chains cubed * number
of service centers in the system). Comparisons of
numerical accuracy with other methods (Convolution, and
another asymptotic method) are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chapin:1995:MSP,
author = "John Chapin and A. Herrod and Mendel Rosenblum and
Anoop Gupta",
title = "Memory system performance of {UNIX} on {CC-NUMA}
multiprocessors",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "1--13",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This study characterizes the performance of a variant
of UNIX SVR4 on a large shared-memory multiprocessor
and analyzes the effects of possible OS and
architectural changes. We use a nonintrusive cache miss
monitor to trace the execution of an OS-intensive
multiprogrammed workload on the Stanford DASH, a 32-CPU
CC-NUMA multiprocessor (CC-NUMA multiprocessors have
cache-coherent shared memory that is physically
distributed across the machine). We find that our
version of UNIX accounts for 24\% of the workload's
total execution time. A surprisingly large fraction of
OS time (79\%) is spent on memory system stalls,
divided equally between instruction and data cache miss
time. In analyzing techniques to reduce instruction
cache miss stall time, we find that replication of only
7\% of the OS code would allow 80\% of instruction
cache misses to be serviced locally on a CC-NUMA
machine. For data cache misses, we find that a small
number of routines account for 96\% of OS data cache
stall time. We find that most of these misses are
coherence (communication) misses, and larger caches
will not necessarily help. After presenting detailed
performance data, we analyze the benefits of several OS
changes and predict the effects of altering the cache
configuration, degree of clustering, and cache
coherence mechanism of the machine. (This paper is
available via \url{http://wwwflash.stanford.edu}.)",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bedichek:1995:TFA,
author = "Robert C. Bedichek",
title = "{Talisman}: fast and accurate multicomputer
simulation",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "14--24",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223589",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Talisman is a simulator that models the execution
semantics and timing of a multicomputer. Talisman is
unique in combining high semantic accuracy, high timing
accuracy, portability, {\em and\/} good performance.
This good performance allows users to run significant
programs on large simulated multicomputers. The
combination of high accuracy and good performance
yields an ideal tool for evaluating architectural
trade-offs. Talisman models the semantics of virtual
memory, a circuit-switched internode interconnect, I/O
devices, and instruction execution in both user and
supervisor modes. It also models the timing of
processor pipelines, caches, local memory buses, and a
circuit-switched interconnect. Talisman executes the
same program binary images as a hardware prototype at a
cost of about 100 host instructions per simulated
instruction. On a suite of accuracy benchmarks run on
the hardware and the simulator, Talisman and the
prototype differ in reported running times by only a
few percent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:1995:RDV,
author = "Leana Golubchik and John C. S. Lui and Richard Muntz",
title = "Reducing {I/O} demand in video-on-demand storage
servers",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "25--36",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223590",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent technological advances have made multimedia
on-demand services, such as home entertainment and
home-shopping, important to the consumer market. One of
the most challenging aspects of this type of service is
providing access either instantaneously or within a
small and reasonable latency upon request. In this
paper, we discuss a novel approach, termed adaptive
piggybacking, which can be used to provide on-demand or
nearly-on-demand service and at the same time reduce
the I/O demand on the multimedia storage server.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghandeharizadeh:1995:CSD,
author = "Shahram Ghandeharizadeh and Seon Ho Kim and Cyrus
Shahabi",
title = "On configuring a single disk continuous media server",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "37--46",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223591",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The past decade has witnessed a proliferation of
repositories that store and retrieve continuous media
data types, e.g., audio and video objects. These
repositories are expected to play a major role in
several emerging applications, e.g., library
information systems, educational applications,
entertainment industry, etc. To support the display of
a video object, the system partitions each object into
fixed size blocks. All blocks of an object reside
permanently on the disk drive. When displaying an
object, the system stages the blocks of the object into
memory one at a time for immediate display. In the
presence of multiple displays referencing different
objects, the bandwidth of the disk drive is multiplexed
among requests, introducing disk seeks. Disk seeks
reduce the useful utilization of the disk bandwidth and
result in a lower number of simultaneous displays
(throughput).This paper characterizes the impact of
disk seeks on the throughput of the system. It
describes REBECA as a mechanism that maximizes the
throughput of the system by minimizing the time
attributed to each incurred seek. A limitation of
REBECA is that it increases the latency observed by
each request. We quantify this throughput vs latency
tradeoff of REBECA and, develop an efficient technique
that computes its configuration parameters to realize
the performance requirements (desired latency and
throughput) of an application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krunz:1995:TMC,
author = "Marwan Krunz and Herman Hughes",
title = "A traffic for {MPEG}-coded {VBR} streams",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "47--55",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223592",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Compression of digital video is the only viable means
to transport real-time full-motion video over BISDN/ATM
networks. Traffic streams generated by video
compressors exhibit complicated patterns which vary
from one compression scheme to another. In this paper
we investigate the traffic characteristics of video
streams which are compressed based on the MPEG
standard. Our study is based on 23 minutes of video
obtained from an entertainment movie. A particular
significance of our data is that it contains all types
of coded frames, namely: Intra-coded (I), Prediction
(P), and Bidirectional (B) MPEG frames. We describe the
statistical behavior of the VBR stream using histograms
and autocorrelation functions. A procedure is developed
to determine the instants of a scene change based on
the changes in the size of successive $I$ frames. It is
found that the length of a scene can be modeled by a
geometric distribution. A model for an MPEG traffic
source is developed in which frames are generated
according to the compression pattern of the captured
video stream. For each frame type, the number of cells
per frame is fitted by a lognormal distribution whose
parameters are determined by the frame type. The
appropriateness and limitations of the model are
examined by studying the multiplexing performance of
MPEG streams. Simulations of an ATM multiplexer are
conducted, in which traffic sources are derived from
the measured VBR trace as well as the proposed model.
The queueing performance in both cases is found to be
relatively close.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:1995:NTM,
author = "Carey L. Williamson",
title = "Network traffic measurement and modeling",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "56--57",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223593",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network traffic measurement and workload
characterization are key steps in the workload modeling
process. Much has been learned through network
measurement and workload modeling in the last ten
years, but new challenges are now at the forefront:
measuring network traffic in the Internet environment,
understanding the implications of network traffic
structure (e.g., self-similarity, autocorrelation, long
range dependence), and accurate modeling of network
traffic workloads for high speed network environments.
This `hot topic' session brings together three
prominent speakers to address each of these topics, in
turn.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1995:GNN,
author = "Erol Gelenbe",
title = "{G}-networks: new queueing models with additional
control capabilities",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "58--59",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.376966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This Hot-Topics Session on G-Networks aims at bringing
these relatively new models which we introduced for the
first time in 1989 and 1990, to the attention of the
performance evaluation and modeling community. The
session includes presentations by Peter Harrison, Onno
Boxma, Jean-Michel Fourneau and myself. We will cover
the basic concepts, some examples of potential
applications, as well as recent research efforts in
this area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tridandapani:1995:FPF,
author = "Srini Tridandapani and Anton T. Dahbura and Charles U.
Martel and John Matthews and Arun K. Somani",
title = "Free performance and fault tolerance (extended
abstract): using system idle capacity efficiently",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "60--61",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223594",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Malony:1995:DIE,
author = "Allen D. Malony",
title = "Data interpretation and experiment planning in
performance tools",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "62--63",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223595",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The parallel scientific computing community is placing
increasing emphasis on portability and scalability of
programs, languages, and architectures. This creates
new challenges for developers of parallel performance
analysis tools, who will have to deal with increasing
volumes of performance data drawn from diverse
platforms. One way to meet this challenge is to
incorporate sophisticated facilities for data
interpretation and experiment planning within the tools
themselves, giving them increased flexibility and
autonomy in gathering and selecting performance data.
This panel discussion brings together four research
groups that have made advances in this direction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaidya:1995:CTL,
author = "Nitin H. Vaidya",
title = "A case for two-level distributed recovery schemes",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "64--73",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223596",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most distributed and multiprocessor recovery schemes
proposed in the literature are designed to tolerate
arbitrary number of failures. In this paper, we
demonstrate that, it is often advantageous to use
`two-level' recovery schemes. A {\em two-level\/}
recovery scheme tolerates the {\em more probable\/}
failures with low performance overhead, while the less
probable failures may be tolerated with a higher
overhead. By minimizing the overhead for the more
frequently occurring failure scenarios, our approach is
expected to achieve lower performance overhead (on
average) as compared to existing recovery schemes. To
demonstrate the advantages of two-level recovery, we
evaluate the performance of a recovery scheme that
takes two different types of checkpoints, namely,
1-checkpoints and $N$-checkpoints. A single failure can
be tolerated by rolling the system back to a
1-checkpoint, while multiple failure recovery is
possible by rolling back to an $N$-checkpoint. For such
a system, we demonstrate that to minimize the average
overhead, it is often necessary to take {\em both\/}
1-checkpoints and $N$-checkpoints. While the
conclusions of this paper are intuitive, the work on
design of appropriate recovery schemes is lacking. The
objective of this paper is to motivate research into
recovery schemes that can provide multiple levels of
fault tolerance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epema:1995:ADU,
author = "D. H. J. Epema",
title = "An analysis of decay-usage scheduling in
multiprocessors",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "74--85",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Priority-aging or decay-usage scheduling is a
time-sharing scheduling policy capable of dealing with
a workload of both interactive and batch jobs by
decreasing the priority of a job when it acquires CPU
time, and by increasing its priority when it does not
use the (a) CPU. In this paper we deal with a
decay-usage scheduling policy in multiprocessor systems
modeled after widely used systems. The priority of a
job consists of a base priority and a time-dependent
part based on processor usage. Because the priorities
in our model are time dependent, a queueing-theoretic
analysis, for instance for the mean response time,
seems impossible. Still, it turns out that as a
consequence of the scheduling policy, the shares of
available CPU time obtained by jobs converge, and a
deterministic analysis for these shares is feasible:
for a fixed set of jobs with very large (infinite)
processing demands, we derive the relation between
their base priorities and their steady-state shares. In
addition, we analyze the relation between the values of
the parameters of the scheduler and the level of
control it can exercise over the steady-state shares.
We validate the model by simulations and by
measurements of actual systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elwalid:1995:FRP,
author = "Anwar Elwalid and Daniel Heyman and T. V. Lakshman and
Debasis Mitra and Alan Weiss",
title = "Fundamental results on the performance of {ATM}
multiplexers with applications to video
teleconferencing",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "86--97",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223598",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The main contributions of this paper are two-fold.
First, we prove fundamental, similarly behaving lower
and upper bounds, and give an approximation based on
the bounds, which is effective for analyzing ATM
multiplexers, even when the traffic has many, possibly
heterogeneous, sources and their models are of high
dimension. Second, we apply our analytic approximation
to statistical models of video teleconference traffic,
obtain the multiplexing system's capacity as determined
by the number of admissible sources for given cell loss
probability, buffer size and trunk bandwidth, and,
finally, compare with results from simulations, which
are driven by actual data from coders. The results are
surprisingly close. Our bounds are based on Large
Deviations theory. Our approximation has two easily
calculated parameters, one is from Chernoff's theorem
and the other is the system's dominant eigenvalue. A
broad range of systems are analyzed and the time for
analysis in each case is a fraction of a second.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Knightly:1995:FLT,
author = "Edward W. Knightly and Dallas E. Wrege and J{\"o}rg
Liebeherr and Hui Zhang",
title = "Fundamental limits and tradeoffs of providing
deterministic guarantees to {VBR} video traffic",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "98--107",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223599",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Compressed digital video is one of the most important
traffic types in future integrated services networks.
However, a network service that supports
delay-sensitive video imposes many problems since
compressed video sources are variable bit rate (VBR)
with a high degree of burstiness. In this paper, we
consider a network service that can provide
deterministic guarantees on the minimum throughput and
the maximum delay of VBR video traffic. A common belief
is that due to the burstiness of VBR traffic, such a
service will not be efficient and will necessarily
result in low network utilization. We investigate the
fundamental limits and tradeoffs in providing
deterministic performance guarantees to video and use a
set of 10 to 90 minute long MPEG-compressed video
traces for evaluation. Contrary to conventional wisdom,
we are able to show that, in many cases, a
deterministic service can be provided to video traffic
while maintaining a reasonable level of network
utilization. We first consider an ideal network
environment that employs the most accurate
deterministic, time-invariant video traffic
characterizations, Earliest-Deadline-First packet
schedulers, and exact admission control conditions. The
utilization achievable in this situation provides the
fundamental limits of a deterministic service. We then
investigate the utilization limits in a network
environment that takes into account practical
constraints, such as the need for fast policing
mechanisms, simple packet scheduling algorithms, and
efficient admission control tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fang:1995:EBW,
author = "Youjian Fang and Michael Devetsikiotis and Ioannis
Lambadaris and A. Roger Kaye",
title = "Exponential bounds for the waiting time distribution
in {Markovian} queues, with applications to {TES/GI/1}
systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "108--115",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several services to be supported by emerging
high-speed networks are expected to result in highly
{\em bursty\/} (autocorrelated) traffic streams. A
typical example is variable bit-rate (VBR) compressed
video. Therefore, traffic modeling and performance
evaluation techniques geared towards autocorrelated
streams are extremely important for the design of
practical networks. The {\em TES\/} (Transform ---
Expand --- Sample) technique has emerged as a general
methodology for modeling autocorrelated random
processes with arbitrary marginal distributions.
Because of their generality and practical
applicability, TES models can be readily used to
accurately characterize bursty traffic streams in ATM
networks. Although TES models can be easily implemented
for simulation studies, the need still exists for {\em
analytical\/} results on the performance of queueing
systems driven by autocorrelated traffic. Of particular
interest are the tails of the waiting time distribution
in queues driven by TES-modeled bursty traffic. Such
tail probabilities, when they become exceedingly small,
may be difficult to obtain via conventional simulation.
In order to extend existing results, based on Large
Deviations theory, to TES processes, the main
difficulty is posed by the continuous state-space of
the TES time-series. In this paper, we develop a
general result concerning exponential bounds for the
waiting time under {\em continuous state-space\/}
Markov arrivals. We apply this result to {\em TES/GI\/}
/1 queues, show numerical examples, and compare our
bound with simulation results. Accurate estimates of
extremely low probabilities are obtained by employing
fast simulation techniques based on {\em importance
sampling.\/}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:1995:OPA,
author = "S. C. Borst",
title = "Optimal probabilistic allocation of customer types to
servers",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "116--125",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223601",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The model under consideration consists of $n$ customer
types attended by $m$ parallel non-identical servers.
Customers are allocated to the servers in a
probabilistic manner; upon arrival customers are sent
to one of the servers according to an $ m \times n$
matrix of routing probabilities. We consider the
problem of finding an allocation that minimizes a
weighted sum of the mean waiting times. We expose the
structure of an optimal allocation and describe for
some special cases in detail how the structure may be
exploited in actually determining an optimal
allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Matta:1995:ZIS,
author = "Ibrahim Matta and A. Udaya Shankar",
title = "{Z}-iteration: a simple method for throughput
estimation in time-dependent multi-class systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "126--135",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223602",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiple-class multiple-resource (MCMR) systems, where
each class of customers requires a particular set of
resources, are common. These systems are often analyzed
under steady-state conditions. We describe a simple
method, referred to as {\em Z-iteration}, to estimate
both transient and steady-state performances of such
systems. The method makes use of results and techniques
available from queueing theory, network analysis,
dynamic flow theory, and numerical analysis. We show
the generality of the Z-iteration by applying it to an
ATM network, a parallel disk system, and a distributed
batch system. Validations against discrete-event
simulations show the accuracy and computational
advantages of the Z-iteration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1995:SRL,
author = "Peter M. Chen and Edward K. Lee",
title = "Striping in a {RAID} level 5 disk array",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "136--145",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223603",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Redundant disk arrays are an increasingly popular way
to improve I/O system performance. Past research has
studied how to stripe data in non-redundant (RAID Level
0) disk arrays, but none has yet been done on how to
stripe data in redundant disk arrays such as RAID Level
5, or on how the choice of striping unit varies with
the number of disks. Using synthetic workloads, we
derive simple design rules for striping data in RAID
Level 5 disk arrays given varying amounts of workload
information. We then validate the syntheticly-derived
design rules using real workload traces to show that
the design rules apply well to real systems. We find no
difference in the optimal striping units for RAID Level
0 and 5 for read-intensive workloads. For
write-intensive workloads, in contrast, the overhead of
maintaining parity causes full-stripe writes (writes
that span the entire error-correction group) to be more
efficient than read-modify writes or reconstruct
writes. This additional factor causes the optimal
striping unit for RAID Level 5 to be four times smaller
for write-intensive workloads than for read-intensive
workloads. We next investigate how the optimal striping
unit varies with the number of disks in an array. We
find that the optimal striping unit for reads in a RAID
Level 5 varies {\em inversely\/} to the number of
disks, but that the optimal striping unit for writes
varies {\em with\/} the number of disks. Overall, we
find that the optimal striping unit for workloads with
an unspecified mix of reads and writes is {\em
independent\/} of the number of disks. Together, these
trends lead us to recommend (in the absence of specific
workload information) that the striping unit over a
wide range of RAID Level 5 disk array sizes be equal to
1/2 * average positioning time * disk transfer rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Worthington:1995:LES,
author = "Bruce L. Worthington and Gregory R. Ganger and Yale N.
Patt and John Wilkes",
title = "On-line extraction of {SCSI} disk drive parameters",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "146--156",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sophisticated disk scheduling algorithms require
accurate, detailed disk drive specifications, including
data about mechanical delays, on-board caching and
prefetching algorithms, command and protocol overheads,
and logical-to-physical block mappings. Comprehensive
disk models used in storage subsystem design require
similar levels of detail. We describe a suite of
general-purpose algorithms and techniques for acquiring
the necessary information from a SCSI disk drive. Using
only the ANSI-standard interface, we demonstrate how
the important parameter values of a modern SCSI drive
can be determined accurately and efficiently.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:1995:DDD,
author = "Joel L. Wolf and Philip S. Yu and Hadas Shachnai",
title = "{DASD} dancing: a disk load balancing optimization
scheme for video-on-demand computer systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "157--166",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223605",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For a video-on-demand computer system we propose a
scheme which balances the load on the disks, thereby
helping to solve a performance problem crucial to
achieving maximal video throughput. Our load balancing
scheme consists of two stages. The static stage
determines good assignments of videos to groups of
striped disks. The dynamic phase uses these
assignments, and features a DASD dancing algorithm
which performs real-time disk scheduling in an
effective manner. Our scheme works synergisticly with
disk striping. We examine the performance of the DASD
dancing algorithm via simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sandhu:1995:ASD,
author = "Harjinder S. Sandhu and Kenneth C. Sevcik",
title = "An analytic study of dynamic hardware and software
cache coherence strategies",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "167--177",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223606",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic software cache coherence strategies use
information about program sharing behaviour to manage
caches at run-time and at a granularity defined by the
application. The program-level information is obtained
through annotations placed into the application by the
user or the compiler. The coherence protocols may range
from simple static algorithms to dynamic algorithms
that use run-time data structures similar to the
directories used in hardware strategies. In this paper,
we present an analytic study of five dynamic software
cache coherence algorithms and compare these to a
representative hardware coherence strategy. The
analytic model is constructed using four input
parameters --- write probability, locality,
granularity, and system size --- and solved by analysis
of a Markov chain. We show that the fundamental
tradeoffs between the different hardware and software
strategies are captured in this model. The results of
the study show that hardware schemes perform better for
fine-grained data structures for much of the parameter
space that we study. However, for coarse-grained data
structures, various software algorithms are dominant
over most of the parameter space. Further, hardware
strategies are found to be more susceptible to the
effects of contention, and also perform worse for the
asymmetric workload that we study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brorsson:1995:SPT,
author = "Mats Brorsson",
title = "{SM-prof}: a tool to visualise and find cache
coherence performance bottlenecks in multiprocessor
programs",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "178--187",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache misses due to coherence actions are often the
major source for performance degradation in cache
coherent multiprocessors. It is often difficult for the
programmer to take cache coherence into account when
writing the program since the resulting access pattern
is not apparent until the program is executed. SM-prof
is a performance analysis tool that addresses this
problem by visualising the shared data access pattern
in a diagram with links to the source code lines
causing performance degrading access patterns. The
execution of a program is divided into time slots and
each data block is classified based on the accesses
made to the block during a time slot. This enables the
programmer to follow the execution over time and it is
possible to track the exact position responsible for
accesses causing many cache misses related to coherence
actions. Matrix multiplication and the MP3D application
from SPLASH are used to illustrate the use of SM-prof.
For MP3D, SM-prof revealed performance limitations that
resulted in a performance improvement of over 75\%.The
current implementation is based on program-driven
simulation in order to achieve non-intrusive profiling.
If a small perturbation of the program execution is
acceptable, it is also possible to use software tracing
techniques given that a data address can be related to
the originating instruction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:1995:SIP,
author = "Pei Cao and Edward W. Felten and Anna R. Karlin and
Kai Li",
title = "A study of integrated prefetching and caching
strategies",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "188--197",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Prefetching and caching are effective techniques for
improving the performance of file systems, but they
have not been studied in an integrated fashion. This
paper proposes four properties that optimal integrated
strategies for prefetching and caching must satisfy,
and then presents and studies two such integrated
strategies, called {\em aggressive\/} and {\em
conservative.\/} We prove that the performance of the
{\em conservative\/} approach is within a factor of two
of optimal and that the performance of the {\em
aggressive\/} strategy is a factor significantly less
than twice that of the optimal case. We have evaluated
these two approaches by trace-driven simulation with a
collection of file access traces. Our results show that
the two integrated prefetching and caching strategies
are indeed close to optimal and that these strategies
can reduce the running time of applications by up to
50\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivasubramaniam:1995:CBR,
author = "Anand Sivasubramaniam and Aman Singla and Umakishore
Ramachandran and H. Venkateswaran",
title = "On characterizing bandwidth requirements of parallel
applications",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "198--207",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Synthesizing architectural requirements from an
application viewpoint can help in making important
architectural design decisions towards building large
scale parallel machines. In this paper, we quantify the
link bandwidth requirement on a binary hypercube
topology for a set of five parallel applications. We
use an execution-driven simulator called SPASM to
collect data points for system sizes that are feasible
to be simulated. These data points are then used in a
regression analysis for projecting the link bandwidth
requirements for larger systems. The requirements are
projected as a function of the following system
parameters: number of processors, CPU clock speed, and
problem size. These results are also used to project
the link bandwidths for other network topologies. Our
study quantifies the link bandwidth that has to be made
available to limit the network overhead in an
application to a specified tolerance level. The results
show that typical link bandwidths (200-300 MBytes/sec)
found in current commercial parallel architectures
(such as Intel Paragon and Cray T3D) would have fairly
low network overhead for the applications considered in
this study. For two of the applications, this overhead
is negligible. For the other applications, this
overhead can be limited to about 30\% of the execution
time provided the problem sizes are increased
commensurate with the processor clock speed. The
technique presented can be useful to a system architect
to synthesize the bandwidth requirements for realizing
well-balanced parallel architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McCann:1995:SMC,
author = "Cathy McCann and John Zahorjan",
title = "Scheduling memory constrained jobs on distributed
memory parallel computers",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "208--219",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223610",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of multiprocessor scheduling
of jobs whose memory requirements place lower bounds on
the fraction of the machine required in order to
execute. We address three primary questions in this
work:1. How can a parallel machine be multiprogrammed
with minimal overhead when jobs have minimum memory
requirements?2. To what extent does the inability of an
application to repartition its workload during runtime
affect the choice of processor allocation policy?3. How
rigid should the system be in attempting to provide
equal resource allocation to each runnable job in order
to minimize average response time? This work is
applicable both to parallel machines and to networks of
workstations supporting parallel applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lebeck:1995:AMN,
author = "Alvin R. Lebeck and David A. Wood",
title = "Active memory: a new abstraction for memory-system
simulation",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "220--230",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the {\em active memory\/}
abstraction for memory-system simulation. In this
abstraction---designed specifically for on-the-fly
simulation, memory references logically invoke a
user-specified function depending upon the reference's
type and accessed memory block state. Active memory
allows simulator writers to specify the appropriate
action on each reference, including `no action' for the
common case of cache hits. Because the abstraction
hides implementation details, implementations can be
carefully tuned for particular platforms, permitting
much more efficient on-the-fly simulation than the
traditional trace-driven abstraction. Our SPARC
implementation, {\em Fast-Cache}, executes simple data
cache simulations two or three times faster than a
highly-tuned trace-driven simulator and only 2 to 7
times slower than the original program. Fast-Cache
implements active memory by performing a fast table
look up of the memory block state, taking as few as 3
cycles on a SuperSPARC for the no-action case. Modeling
the effects of Fast-Cache's additional lookup
instructions qualitatively shows that Fast-Cache is
likely to be the most efficient simulator for miss
ratios between 3\% and 40\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:1995:CTD,
author = "Edmundo {de Souza e Silva} and H. Richard Gail and
Reinaldo {Vallejos Campos}",
title = "Calculating transient distributions of cumulative
reward",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "231--240",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223612",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov reward models have been employed to obtain
performability measures of computer and communication
systems. In these models, a continuous time Markov
chain is used to represent changes in the system
structure, usually caused by faults and repairs of its
components, and reward rates are assigned to states of
the model to indicate some measure of accomplishment at
each structure. A procedure to calculate numerically
the distribution of the reward accumulated over a
finite observation period is presented. The development
is based solely on probabilistic arguments, and the
final recursion is quite simple. The algorithm has a
low computational cost in terms of model parameters. In
fact, the number of operations is linear in a parameter
that is smaller than the number of rewards, while the
storage required is independent of the number of
rewards. We also consider the calculation of the
distribution of cumulative reward for models in which
impulse based rewards are associated with
transitions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carrasco:1995:RRT,
author = "Juan A. Carrasco and Angel Calder{\'o}n",
title = "Regenerative randomization: theory and application
examples",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "241--252",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223613",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Randomization is a popular method for the transient
solution of continuous-time Markov models. Its primary
advantages over other methods (i.e., ODE solvers) are
robustness and ease of implementation. It is however
well-known that the performance of the method
deteriorates with the `stiffness' of the model: the
number of required steps to solve the model up to time
$t$ tends to {\Lambda} $t$ for {\Lambda} $t$
{\rightarrow} {\infty}. In this paper we present a new
method called regenerative randomization and apply it
to the computation of two transient measures for
rewarded irreducible Markov models. Regarding the
number of steps required in regenerative randomization
we prove that: (1) it is smaller than the number of
steps required in standard randomization when the
initial distribution is concentrated in a single state,
(2) for $ \Lambda t \rightarrow \infty $, it is upper
bounded by a function $ O(\log (\Lambda t /
\epsilon))$, where $ \epsilon $ is the desired relative
approximation error bound. Using dependability and
performability examples we analyze the performance of
the method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1995:CTA,
author = "Albert G. Greenberg and R. Srikant",
title = "Computational techniques for accurate performance
evaluation of multirate, multihop communication
networks",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "253--260",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computational techniques are presented for
connection-level performance evaluation of
communication networks, with stochastic multirate
traffic, state dependent admission control, alternate
routing, and general topology --- all characteristics
of emerging integrated service networks. The techniques
involve solutions of systems of fixed point equations,
which estimate equilibrium network behavior. Though
similar techniques have been applied with success to
single-rate fully connected networks, the curse of
dimensionality arises when the techniques are extended
to multirate, multihop networks, and the cost of
solving the fixed point equations exactly is
exponential. This exponential barrier is skirted by
exploiting, in particular, a close relationship with
the network reliability problem, and by borrowing
effective heuristics from the reliability domain. A
series of experiments are reported on, comparing the
estimates from the new techniques to the results of
discrete event simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ott:1995:IET,
author = "Teun Ott",
title = "The {Internet} in evolution, and {TCP} over {ATM}
(panel session)",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "261--262",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Trivedi:1995:NMP,
author = "Kishor S. Trivedi and A. Bobbio and G. Ciardo and R.
German and A. Puliafito and M. Telek",
title = "Non-{Markovian} {Petri} nets",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "263--264",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Non-Markovian models allow us to capture a very wide
range of circumstances in which it is necessary to
model phenomena whose times to occurrence is not
exponentially distributed. Events such as timeouts in a
protocol, service times at a machine performing the
same task on each part, and memory access or
instruction execution in a low-level h/w or s/w model,
have durations which are constant or with a very low
variance. Phase-type distributions can be used to
approximate a non-exponential, but they increase the
size of the state space. The analysis of stochastic
systems with non-exponential timing is of increasing
interest in the literature and requires the development
of suitable modeling tools. Recently, some effort has
been devoted to generalize the concept of {\em
Stochastic Petri Nets\/} (SPN), by allowing the firing
times to be generally distributed. A particular case of
non-Markovian {\em SPN}, is the class of {\em
Deterministic and SPN (DSPN)\/} [1]. A {\em DSPN\/} is
a non-Markovian {\em SPN\/} where, in each marking, at
most one transition is allowed to have a deterministic
firing time with enabling memory policy. A new class of
stochastic Petri nets has recently been defined [2, 3]
by generalizing the deterministic firing times of the
DSPN to generally distributed firing times. The
underlying stochastic process for these classes of
Petri nets is a {\em Markov Regenerative Process\/}
(MRGP). This observation has opened a very fertile line
of research aimed at the definition of solvable classes
of models whose underlying marking process is an MRGP,
and therefore referred to as {\em Markov Regenerative
Stochastic Petri Nets (MRSPN).\/} Some of the results
in this filed will be described in the session. In
particular, Ciardo investigates stochastic confusion by
defining the selection probability for transitions
attempting to fire at the same time. German introduces
the `method of supplementary variables' for the
derivation of state equations describing the transient
behavior of the marking process. Puliafito describes
how, under some constraints, concurrent enabling of
several generally distributed timed transitions is
allowed. Bobbio and Telek discuss how age memory policy
can be included to capture preemptive mechanisms of the
resume {\em (prs)\/} type.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Erramilli:1995:PIS,
author = "Ashok Erramilli",
title = "Performance impacts of self-similarity in traffic",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "265--266",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent measurement studies in Bellcore and elsewhere
have convincingly established the presence of
statistical self similarity in high-speed network
traffic. What is less clear --- and as such the subject
of intense current research --- is the impact of the
self-similarity on network performance. Given that
traditional queueing models of network performance do
not model self-similarity, the validity of traditional
models to predict network performance would be
supported if it is shown that self-similarity does not
have measurable impacts on performance. On the other
hand, if the converse of this assertion were true, it
would have significant impacts on the way networks are
designed and analyzed, as well as open up new areas of
research in mathematical modeling, queueing analysis,
network design and control. The issues addressed in
this session are therefore of fundamental importance in
high-speed network research. Given that queueing
behavior is dominated by traffic characteristics over
the time scales of busy periods, it has been argued
that phenomena that span many time scales, such as
self-similarity, should not be relevant for queueing
performance. However, the paper by Narayan, Erramilli
and Willinger presents evidence that for data traffic,
the long range dependence (which is related to the
self-similarity in traffic) can dominate queueing
behavior under a variety of conditions. Specifically,
it is shown based on a series of carefully designed
simulation experiments with actual traffic traces, that
the queueing behavior with actual traces is
considerably heavier than that predicted by traditional
theory, and that these differences are attributable to
long range dependence. The paper by Heyman and Lakshman
investigates modeling of video traffic to predict cell
loss performance with finite buffer systems, and they
conclude that long-range dependence is not a crucial
property in determining the finite buffer behavior of
video conferences. In particular, a Markov chain model
that does not model long-range dependence is
nevertheless able to reproduce various operating
characteristics over a wide range of loadings obtained
with the actual video trace. Mukherjee, Adas, Klivansky
and Song investigate the performance impacts of
short-range and long-range correlation components using
simulations with a fractional ARIMA model. They also
discuss a strategy to provide quality of service
guarantees with long range dependent traffic, as well
as recent results on NSFNET traffic. Finally, the paper
by Li describes a frequency-domain based analytical
tool that matches a special class of Markov chains with
traces exhibiting a variety of characteristics,
including long-range dependence. Good agreement is
reported between analytical queueing solutions of the
matched Markov chains, and simulation results obtained
video and data traffic traces. This session therefore
brings together a wide range of viewpoints on this
issue. Resolution of such seemingly conflicting
conclusions lies in the fact that in performance
analysis, answers sensitively depend on the specific
details of a problem. Thus the proper question to ask
is not whether or not self-similarity matters in
queueing; but under what conditions it matters.
Likewise, the question to ask is not whether a class of
models is invalid; but to identify the conditions under
which traditional Markov or self-similar traffic models
are expected to be valid. Finally, given an
understanding of statistical features that are relevant
to a given problem, the challenge is to model these
accurately and parsimoniously so that the model is
useful in practical performance analysis. The work
outlined in the abstracts below adds significantly to
our understanding of these issues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arpaci:1995:IPS,
author = "Remzi H. Arpaci and Andrea C. Dusseau and Amin M.
Vahdat and Lok T. Liu and Thomas E. Anderson and David
A. Patterson",
title = "The interaction of parallel and sequential workloads
on a network of workstations",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "267--278",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines the plausibility of using a
network of workstations (NOW) for a mixture of parallel
and sequential jobs. Through simulations, our study
examines issues that arise when combining these two
workloads on a single platform. Starting from a
dedicated NOW just for parallel programs, we
incrementally relax uniprogramming restrictions until
we have a multi-programmed, multi-user NOW for both
interactive sequential users and parallel programs. We
show that a number of issues associated with the
distributed NOW environment (e.g., daemon activity,
coscheduling skew) can have a small but noticeable
effect on parallel program performance. We also find
that efficient migration to idle workstations is
necessary to maintain acceptable parallel application
performance. Furthermore, we present a methodology for
deriving an optimal delay time for recruiting idle
machines for use by parallel programs; this {\em
recruitment threshold\/} was just 3 minutes for the
research cluster we measured. Finally, we quantify the
effects of the additional parallel load upon
interactive users by keeping track of the potential
number of {\em user delays\/} in our simulations. When
we limit the maximum number of delays per user, we can
still maintain acceptable parallel program performance.
In summary, we find that for our workloads a 2:1 rule
applies: a NOW cluster of approximately 60 machines can
sustain a 32-node parallel workload in addition to the
sequential load placed upon it by interactive users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Myllymaki:1995:DTJ,
author = "Jussi Myllymaki and Miron Livny",
title = "Disk-tape joins: synchronizing disk and tape access",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "279--290",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today large amounts of data are stored on tertiary
storage media such as magnetic tapes and optical disks.
DBMSs typically operate only on magnetic disks since
they know how to maneuver disks and how to optimize
accesses on them. Tertiary devices present a problem
for DBMSs since these devices have dismountable media
and have very different operational characteristics
compared to magnetic disks. For instance, most tape
drives offer very high capacity at low cost but are
accessed sequentially, involve lengthy latencies, and
deliver lower bandwidth. Typically, the scope of a
DBMS's query optimizer does not include tertiary
devices, and the DBMS might not even know how to
control and operate upon tertiary-resident data. In a
three-level hierarchy of storage devices (main memory,
disk, tape), the typical solution is to elevate
tape-resident data to disk devices, thus bringing such
data into the DBMS' control, and then to perform the
required operations on disk. This requires additional
space on disk and may not give the lowest response time
possible. With this challenge in mind, we studied the
trade-offs between memory and disk requirements and the
execution time of a join with the help of two
well-known join methods. The conventional, disk-based
Nested Block Join and Hybrid Hash Join were modified to
operate directly on tapes. An experimental
implementation of the modified algorithms gave us more
insight into how the algorithms perform in practice.
Our performance analysis shows that a DBMS desiring to
operate on tertiary storage will benefit from special
algorithms that operate directly on tape-resident data
and take into account and exploit the mismatch in disk
and tape characteristics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "concurrent I/O; join methods; tertiary storage",
}
@Article{Phalke:1995:IRG,
author = "Vidyadhar Phalke and Bhaskarpillai Gopinath",
title = "An inter-reference gap model for temporal locality in
program behavior",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "291--300",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The property of locality in program behavior has been
studied and modelled extensively because of its
application to memory design, code optimization,
multiprogramming etc. We propose a $k$ order Markov
chain based scheme to model the sequence of time
intervals between successive references to the same
address in memory during program execution. Each unique
address in a program is modelled separately. To
validate our model, which we call the Inter-Reference
Gap (IRG) model, we show substantial improvements in
three different areas where it is applied. (1) We
improve upon the miss ratio for the Least Recently Used
(LRU) memory replacement algorithm by up to 37\%. (2)
We achieve up to 22\% space-time product improvement
over the Working Set (WS) algorithm for dynamic memory
management. (3) A new trace compression technique is
proposed which compresses up to 2.5\% with zero error
in WS simulations and up to 3.7\% error in the LRU
simulations. All these results are obtained
experimentally, via trace driven simulations over a
wide range of cache traces, page reference traces,
object traces and database traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic memory management; locality of reference;
Markov chains; memory replacement; prediction; trace
compaction; trace driven simulation",
}
@Article{Braams:1995:BCP,
author = "Jan Braams",
title = "Batch class process scheduler for {Unix SVR4}",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "301--302",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Donatelli:1995:SSR,
author = "S. Donatelli and G. Franceschinis",
title = "State space reductions using stochastic well-formed
net simplifications: an application to random polling
systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "303--304",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balsamo:1995:ART,
author = "S. Balsamo and I. Mura",
title = "Approximate response time distribution in {Fork} and
{Join} systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "305--306",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:1995:SEA,
author = "Xiaodong Zhang and Zhichen Xu",
title = "A semi-empirical approach to scalability study",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "307--308",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1995:PFP,
author = "Eric Hughes and Marianne Winslett",
title = "{PEDCAD}: a framework for performance evaluation of
object database applications",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "309--310",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223625",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salehi:1995:SCA,
author = "James D. Salehi and James F. Kurose and Don Towsley",
title = "Scheduling for cache affinity in parallelized
communication protocols",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "311--312",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223626",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We explore processor-cache affinity scheduling of
parallel network protocol processing in a setting in
which protocol processing executes on a shared-memory
multiprocessor concurrently with a general workload of
non-protocol activity. We find that affinity scheduling
can significantly reduce the communication delay
associated with protocol processing, enabling the host
to support a greater number of concurrent streams and
to provide a higher maximum throughput to individual
streams. In addition, we compare implementations of two
parallelization approaches ({\em Locking\/} and {\em
Independent Protocol Stacks\/}) with very different
caching behaviors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chatterjee:1995:MAM,
author = "Amit K. Chatterjee and Vijay K. Konangi",
title = "Modeling and analysis of multi channel asymmetric
packet switch modules in a bursty and nonuniform
traffic environment",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "313--314",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223627",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:1995:TNT,
author = "Gautam Shah and Umakishore Ramachandran and Richard
Fujimoto",
title = "{Timepatch}: a novel technique for the parallel
simulation of multiprocessor caches",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "315--316",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223628",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sundaram:1995:FAB,
author = "C. R. M. Sundaram and Derek L. Eager",
title = "Future applicability of bus-based shared memory
multiprocessors",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "317--318",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223629",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:1995:MFC,
author = "Gianfranco Ciardo and Ludmila Cherkasova and Vadim
Kotov and Tomas Rokicki",
title = "Modeling a {Fibre Channel} switch with stochastic
{Petri} nets",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "319--320",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223630",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arunachalam:1995:PPP,
author = "Meenakshi Arunachalam and Alok Choudhary",
title = "A prefetching prototype for the parallel file systems
on the {Paragon}",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "321--322",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223631",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gopalakrishnan:1996:BRT,
author = "R. Gopalakrishnan and Gurudatta M. Parulkar",
title = "Bringing real-time scheduling theory and practice
closer for multimedia computing",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "1--12",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233017",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper seeks to bridge the gap between theory and
practice of real-time scheduling in the domain of high
speed multimedia networking. We show that the strict
preemptive nature of real-time scheduling leads to more
context switching, and requires system calls for
concurrency control. We present our scheduling scheme
called rate-monotonic with delayed preemption (
rmdp) and show how it reduces both these overheads. We
then develop the analytical framework to analyze rmdp
and other scheduling schemes that lie in the region
between strict (immediate) preemption and no
preemption. Our {\em idealized scheduler simulation\/}
methodology accounts for the blocking introduced by
these schemes under the usual assumption that the time
for context switching and preemption is zero. We derive
simpler schedulability tests for non-preemptive
scheduling, and prove a variant of rate-monotonic
scheduling that has fewer preemptions. Our measurements
on Sparc and Pentium platforms, show that for the
workloads we considered, Rmdp increases useful
utilization by as much as 8\%. Thus our scheduling
policies have the potential to improve performance over
existing methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:1996:EPL,
author = "Mor Harchol-Balter and Allen B. Downey",
title = "Exploiting process lifetime distributions for dynamic
load balancing",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "13--24",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233019",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We measure the distribution of lifetimes for UNIX
processes and propose a functional form that fits this
distribution well. We use this functional form to
derive a policy for preemptive migration, and then use
a trace-driven simulator to compare our proposed policy
with other preemptive migration policies, and with a
non-preemptive load balancing strategy. We find that,
contrary to previous reports, the performance benefits
of preemptive migration are significantly greater than
those of non-preemptive migration, even when the
memory-transfer cost is high. Using a model of
migration costs representative of current systems, we
find that preemptive migration reduces the mean delay
(queueing and migration) by 35--50\%, compared to
non-preemptive migration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dusseau:1996:EDS,
author = "Andrea C. Dusseau and Remzi H. Arpaci and David E.
Culler",
title = "Effective distributed scheduling of parallel
workloads",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "25--36",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a distributed algorithm for time-sharing
parallel workloads that is competitive with
coscheduling. {\em Implicit scheduling\/} allows each
local scheduler in the system to make independent
decisions that dynamically coordinate the scheduling of
cooperating processes across processors. Of particular
importance is the blocking algorithm which decides the
action of a process waiting for a communication or
synchronization event to complete. Through simulation
of bulk-synchronous parallel applications, we find that
a simple two-phase fixed-spin blocking algorithm
performs well; a two-phase adaptive algorithm that
gathers run-time data on barrier wait-times performs
slightly better. Our results hold for a range of
machine parameters and parallel program
characteristics. These findings are in direct contrast
to the literature that states explicit coscheduling is
necessary for fine-grained programs. We show that the
choice of the local scheduler is crucial, with a
priority-based scheduler performing two to three times
better than a round-robin scheduler. Overall, we find
that the performance of implicit scheduling is near
that of coscheduling (+/- 35\%), without the
requirement of explicit, global coordination.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:1996:LPB,
author = "Beng-Hong Lim and Ricardo Bianchini",
title = "Limits on the performance benefits of multithreading
and prefetching",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "37--46",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents new analytical models of the
performance benefits of multithreading and prefetching,
and experimental measurements of parallel applications
on the MIT Alewife multiprocessor. For the first time,
both techniques are evaluated on a real machine as
opposed to simulations. The models determine the region
in the parameter space where the techniques are most
effective, while the measurements determine the region
where the applications lie. We find that these regions
do not always overlap significantly. The multithreading
model shows that only 2-4 contexts are necessary to
maximize this technique's potential benefit in current
multiprocessors. Multithreading improves execution time
by less than 10\% for most of the applications that we
examined. The model also shows that multithreading can
significantly improve the performance of the same
applications in multiprocessors with longer latencies.
Reducing context-switch overhead is not crucial. The
software prefetching model shows that allowing 4
outstanding prefetches is sufficient to achieve most of
this technique's potential benefit on current
multiprocessors. Prefetching improves performance over
a wide range of parameters, and improves execution time
by as much as 20-50\% even on current multiprocessors.
The two models show that prefetching has a significant
advantage over multithreading for machines with low
memory latencies and/or applications with high cache
miss rates because a prefetch instruction consumes less
time than a context-switch.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dinda:1996:FMA,
author = "Peter A. Dinda and David R. O'Hallaron",
title = "Fast message assembly using compact address
relations",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "47--56",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Message assembly and disassembly represent a
significant fraction of total communication time in
many parallel systems. We introduce a run-time approach
for fast message assembly and disassembly. The approach
is based on generating addresses by decoding a
precomputed and compactly stored address relation that
describes the mapping of addresses on the source node
to addresses on the destination node. The main result
is that relations induced by redistributions of regular
block-cyclic distributed arrays can be encoded in an
extremely compact form that facilitates high throughput
message assembly and disassembly. We measure the
throughput of decoding-based message assembly and
disassembly on several systems and find performance on
par with copy throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Parsons:1996:CAM,
author = "Eric W. Parsons and Kenneth C. Sevcik",
title = "Coordinated allocation of memory and processors in
multiprocessors",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "57--67",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An important issue in multiprogrammed multiprocessor
systems is the scheduling of parallel jobs. Most
research in the area has focussed solely on the
allocation of processors to jobs. However, since memory
is also a critical resource for many parallel jobs, the
allocation of memory and processors must be coordinated
to allow the system to operate most effectively. To
understand how to design such coordinated scheduling
disciplines, it is important to have a theoretical
foundation. To this end, we develop bounds on the
achievable system throughput when both memory and
processing time are in demand. We then propose and
simulate a simple discipline and relate its performance
to the throughput bounds. An important result of our
work is for the situation in which the workload speedup
is convex (from above), but the speedup characteristics
of individual jobs are unknown. It shows that an
equi-allocation strategy for processors can achieve
near-maximum throughput, yet offer good mean response
times, when both memory and processors are
considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Witchel:1996:EFF,
author = "Emmett Witchel and Mendel Rosenblum",
title = "{Embra}: fast and flexible machine simulation",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "68--79",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes Embra, a simulator for the
processors, caches, and memory systems of uniprocessors
and cache-coherent multiprocessors. When running as
part of the SimOS simulation environment, Embra models
the processors of a MIPS R3000/R4000 machine faithfully
enough to run a commercial operating system and
arbitrary user applications. To achieve high simulation
speed, Embra uses dynamic binary translation to
generate code sequences which simulate the workload. It
is the first machine simulator to use this technique.
Embra can simulate real workloads such as multiprocess
compiles and the SPEC92 benchmarks running on Silicon
Graphic's IRIX 5.3 at speeds only 3 to 9 times slower
than native execution of the workload, making Embra the
fastest reported complete machine simulator. Dynamic
binary translation also gives Embra the flexibility to
dynamically control both the simulation statistics
reported and the simulation model accuracy with low
performance overheads. For example, Embra can customize
its generated code to include a processor cache model
which allows it to compute the cache misses and memory
stall time of a workload. Customized code generation
allows Embra to simulate a machine with caches at
slowdowns of only a factor of 7 to 20. Most of the
statistics generated at this speed match those produced
by a slower reference simulator to within 1\%. This
paper describes the techniques used by Embra to achieve
high performance, focusing on the requirements unique
to machine simulation, including modeling the
processor, memory management unit, and caches. In order
to study Embra's memory system performance we use the
SimOS simulation system to examine Embra itself. We
present a detailed breakdown of Embra's memory system
performance for two cache hierarchies to understand
Embra's current performance and to show that Embra's
implementation techniques benefit significantly from
the larger cache hierarchies that are becoming
available. Embra has been used for operating system
development and testing as well as for studies of
computer architecture. In this capacity it has
simulated large, commercial workloads including IRIX
running a relational database system and a CAD system
for billions of simulated machine cycles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "virtual machine",
}
@Article{Brakmo:1996:ENS,
author = "Lawrence S. Brakmo and Larry L. Peterson",
title = "Experiences with network simulation",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "80--90",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulation is a critical tool in developing, testing,
and evaluating network protocols and architectures.
This paper describes $x$-Sim, a network simulator based
on the $x$-kernel, that is able to fully simulate the
topologies and traffic patterns of large scale
networks. It also illustrates the capabilities and
usefulness of the simulator with case studies. Finally,
based on our experiences using $x$-Sim, we identify a
set of principles (guidelines) for network simulation,
and present concrete examples that quantify the value
of these principles, along with the cost of ignoring
them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1996:AUL,
author = "Albert G. Greenberg and S. Shenker and Alexander L.
Stolyar",
title = "Asynchronous updates in large parallel systems",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "91--103",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Lubachevsky [5] introduced a new parallel simulation
technique intended for systems with limited
interactions between their many components or sites.
Each site has a local simulation time, and the states
of the sites are updated asynchronously. This
asynchronous updating appears to allow the simulation
to achieve a high degree of parallelism, with very low
overhead in processor synchronization. The key issue
for this asynchronous updating technique is: how fast
do the local times make progress in the large system
limit? We show that in a simple $K$-random interaction
model the local times progress at a rate $ 1 / (K +
1)$. More importantly, we find that the asymptotic
distribution of local times is described by a {\em
traveling wave\/} solution with exponentially decaying
tails. In terms of the parallel simulation, though the
interactions are local, a very high degree of global
synchronization results, and this synchronization is
succinctly described by the traveling wave solution.
Moreover, we report on experiments that suggest that
the traveling wave solution is {\em universal\/}; i.e.,
it holds in realistic scenarios (out of reach of our
analysis) where interactions among sites are not
random.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stiliadis:1996:DAF,
author = "Dimitrios Stiliadis and Anujan Varma",
title = "Design and analysis of frame-based fair queueing: a
new traffic scheduling algorithm for packet-switched
networks",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "104--115",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we introduce and analyze {\em
frame-based fair queueing}, a novel traffic scheduling
algorithm for packet-switched networks. The algorithm
provides end-to-end delay bounds identical to those of
PGPS (packet-level generalized processor sharing),
without the complexity of simulating the fluid-model
system in the background as required in PGPS. The
algorithm is therefore ideally suited for
implementation in packet switches supporting a large
number of sessions. We present a simple implementation
of the algorithm for a general packet switch. In
addition, we prove that the algorithm is fair in the
sense that sessions are not penalized for excess
bandwidth they received while other sessions were idle.
Frame-based fair queueing belongs to a general class of
scheduling algorithms, which we call {\em
Rate-Proportional Servers}. This class of algorithms
provides the same end-to-end delay and burstiness
bounds as PGPS, but allows more flexibility in the
design and implementation of the algorithm. We provide
a systematic analysis of this class of schedulers and
obtain bounds on their fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yates:1996:NSL,
author = "David J. Yates and Erich M. Nahum and James F. Kurose
and Don Towsley",
title = "Networking support for large scale multiprocessor
servers",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "116--125",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the next several years the performance demands on
globally available information servers are expected to
increase dramatically. These servers must be capable of
sending and receiving data over hundreds or even
thousands of simultaneous connections. In this paper,
we show that connection-level parallel protocols (where
different connections are processed in parallel)
running on a shared-memory multiprocessor can deliver
high network bandwidth across a large number of
connections. We experimentally evaluate
connection-level parallel implementations of both
TCP/IP and UDP/IP protocol stacks. We focus on three
questions in our performance evaluation: how throughput
scales with the number of processors, how throughput
changes as the number of connections increases, and how
fairly the aggregate bandwidth is distributed across
connections. We show how several factors impact
performance: the number of processors used, the number
of threads in the system, the number of connections
assigned to each thread, and the type of protocols in
the stack (i.e., TCP versus UDP).Our results show that
with careful implementation connection-level parallel
protocol stacks scale well with the number of
processors, and deliver high throughput which is, for
the most part, sustained as the number of connections
increases. Maximizing the number of threads in the
system yields the best overall throughput. However, the
best fairness behavior is achieved by matching the
number of threads to the number of processors and
scheduling connections assigned to threads in a
round-robin manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:1996:WSW,
author = "Martin F. Arlitt and Carey L. Williamson",
title = "{Web} server workload characterization: the search for
invariants",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "126--137",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The phenomenal growth in popularity of the World Wide
Web (WWW, or the Web) has made WWW traffic the largest
contributor to packet and byte traffic on the NSFNET
backbone. This growth has triggered recent research
aimed at reducing the volume of network traffic
produced by Web clients and servers, by using caching,
and reducing the latency for WWW users, by using
improved protocols for Web interaction. Fundamental to
the goal of improving WWW performance is an
understanding of WWW workloads. This paper presents a
workload characterization study for Internet Web
servers. Six different data sets are used in this
study: three from academic (i.e., university)
environments, two from scientific research
organizations, and one from a commercial Internet
provider. These data sets represent three different
orders of magnitude in server activity, and two
different orders of magnitude in time duration, ranging
from one week of activity to one year of activity.
Throughout the study, emphasis is placed on finding
workload {\em invariants\/}: observations that apply
across all the data sets studied. Ten invariants are
identified. These invariants are deemed important since
they (potentially) represent universal truths for all
Internet Web servers. The paper concludes with a
discussion of caching and performance issues, using the
invariants to suggest performance enhancements that
seem most promising for Internet Web servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:1996:IPM,
author = "Margaret Martonosi and David Ofelt and Mark Heinrich",
title = "Integrating performance monitoring and communication
in parallel computers",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "138--147",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large and increasing gap exists between processor
and memory speeds in scalable cache-coherent
multiprocessors. To cope with this situation,
programmers and compiler writers must increasingly be
aware of the memory hierarchy as they implement
software. Tools to support memory performance tuning
have, however, been hobbled by the fact that it is
difficult to observe the caching behavior of a running
program. Little hardware support exists specifically
for observing caching behavior; furthermore, what
support does exist is often difficult to use for making
fine-grained observations about program memory
behavior. Our work observes that in a multiprocessor,
the actions required for memory performance monitoring
are similar to those required for enforcing cache
coherence. In fact, we argue that on several machines,
the coherence/communication system itself can be used
as machine support for performance monitoring. We have
demonstrated this idea by implementing the FlashPoint
memory performance monitoring tool. FlashPoint is
implemented as a special performance-monitoring
coherence protocol for the Stanford FLASH
Multiprocessor. By embedding performance monitoring
into a cache-coherence scheme based on a programmable
controller, we can gather detailed, per-data-structure,
memory statistics with less than a 10\% slowdown
compared to unmonitored program executions. We present
results on the accuracy of the data collected, and on
how FlashPoint performance scales with the number of
processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnaswamy:1996:MAE,
author = "Umesh Krishnaswamy and Isaac D. Scherson",
title = "Micro-architecture evaluation using performance
vectors",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "148--159",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarking is a widely used approach to measure
computer performance. Current use of benchmarks only
provides running times to describe the performance of a
tested system. Glancing through these execution times
provides little or no information about system
strengths and weaknesses. A novel benchmarking
methodology is proposed to identify key performance
parameters; the methodology is based on measuring
performance vectors. A performance vector is a vector
of ratings that represents delivered performance of
primitive operations of a system. Measuring the
performance vector of a system in a typical user
workload can be a tough problem. We show how the
performance vector falls out of an equation consisting
of dynamic instruction counts and execution times of
benchmarks. We present a non-linear approach for
computing the performance vector. The efficacy of the
methodology is ascertained by evaluating the
micro-architecture of the Sun SuperSPARC superscalar
processor using SPEC benchmarks. Results show
interesting tradeoffs in the SuperSPARC and speak
favorably of our methodology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crovella:1996:SSW,
author = "Mark E. Crovella and Azer Bestavros",
title = "Self-similarity in {World Wide Web} traffic: evidence
and possible causes",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "160--169",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently the notion of {\em self-similarity\/} has
been shown to apply to wide-area and local-area network
traffic. In this paper we examine the mechanisms that
give rise to the self-similarity of network traffic. We
present a hypothesized explanation for the possible
self-similarity of traffic by using a particular subset
of wide area traffic: traffic due to the World Wide Web
(WWW). Using an extensive set of traces of actual user
executions of NCSA Mosaic, reflecting over half a
million requests for WWW documents, we examine the
dependence structure of WWW traffic. While our
measurements are not conclusive, we show evidence that
WWW traffic exhibits behavior that is consistent with
self-similar traffic models. Then we show that the
self-similarity in such traffic can be explained based
on the underlying distributions of WWW document sizes,
the effects of caching and user preference in file
transfer, the effect of user `think time', and the
superimposition of many such transfers in a local area
network. To do this we rely on empirically measured
distributions both from our traces and from data
independently collected at over thirty WWW sites.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hillyer:1996:MPC,
author = "Bruce K. Hillyer and Avi Silberschatz",
title = "On the modeling and performance characteristics of a
serpentine tape drive",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "170--179",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New applications require online access to many
terabytes of data, but a magnetic disk storage system
this large requires thousands of drives. Magnetic tape
is be a good alternative, except that the application
demand for transparent data retrieval is not met by
current tape systems because of their high access
latency. This latency can be significantly improved by
good retrieval scheduling. A fundamental prerequisite
to efficient scheduling is the ability to estimate the
amount of time required for tape positioning operations
(the {\em locate time\/}). For serpentine tape, which
is the most common mass storage tape technology, this
estimation is subtle and complex. The main contribution
of this paper is a locate-time model for a DLT4000 tape
drive. The accuracy of the model is evaluated by
measurements, and the utility of the model is
demonstrated through a model-driven simulation of
retrieval scheduling, validated by measurements and
sensitivity testing. In brief, the locate-time model is
accurate to within a few percent, which enables the
production of efficient schedules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:1996:AMH,
author = "Daniel A. Menasc{\'e} and Odysseas I. Pentakalos and
Yelena Yesha",
title = "An analytic model of hierarchical mass storage systems
with network-attached storage devices",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "180--189",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network attached storage devices improve I/O
performance by separating control and data paths and
eliminating host intervention during data transfer.
Devices are attached to a high speed network for data
transfer and to a slower network for control messages.
Hierarchical mass storage systems use disks to cache
the most recently used files and tapes (robotic and
manually mounted) to store the bulk of the files in the
file system. This paper shows how queuing network
models can be used to assess the performance of
hierarchical mass storage systems that use network
attached storage devices. The analytic model validated
through simulation was used to analyze many different
scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1996:AAW,
author = "Ken Chen and Laurent Decreusefond",
title = "An approximate analysis of waiting time in multi-class
{M/G/1/./EDF} queues",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "190--199",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Earliest-Deadline-First (EDF) queueing discipline
is being more and more widely used for handling
time-sensitive applications in computer systems and
networks. In this paper, we consider an arbitrary
number of traffic classes with class-specific
soft-deadline. A soft-deadline is a target waiting-time
limit that can be missed. EDF queueing has been proved
to minimize the maximum delay overflow related to this
limit. We propose a quantitative analysis, through the
metric of mean waiting time, on the behavior of EDF
queueing. This analysis gives also insight on the
correlation between traffic classes with different
time-constraints. Technically speaking, we have proven
that the mean waiting times for an arbitrary set of $N$
classes of traffic streams with soft deadlines are the
unique solution of a system of non-linear equations
under the constraint of the Kleinrock's conservation
law. We then provide an $ O(N^2)$ algorithm to get the
solution. Simulation suggests that the theoretical
approximation we made is quite acceptable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "communication networks; computer architecture;
multimedia systems; real-time systems; stochastic
modeling",
}
@Article{Aggarwal:1996:OPM,
author = "Charu Aggarwal and Joel Wolf and Philip S. Yu",
title = "On optimal piggyback merging policies for
video-on-demand systems",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "200--209",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A critical issue in the performance of a
video-on-demand system is the I/O bandwidth required in
order to satisfy client requests. A number of
techniques have been proposed in order to reduce these
bandwidth requirements. In this paper we concentrate on
one such technique, known as adaptive piggybacking. We
develop and analyze piggyback merging policies which
are optimal over large classes of reasonable methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gerber:1996:EDV,
author = "Richard Gerber and Ladan Gharai",
title = "Experiments with digital video playback",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "210--221",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe our experiments on digital
video applications, concentrating on the static and
dynamic tradeoffs involved in video playback. Our
results were extracted from a controlled series of 272
tests, which we ran in three stages. In the first stage
of 120 tests, we used a simple player-monitor tool to
evaluate the effects of various static parameters: {\em
compression type, frame size, digitized rate, spatial
quality\/} and {\em keyframe distribution.\/} The tests
were carried out on two Apple Macintosh platforms: at
the lower end a Quadra 950, and at the higher end, a
Power PC 7100/80. Our quantitative metrics included
average playback rate, as well as the rate's variance
over one-second intervals. The first set of experiments
unveiled several anomalous latencies. To track them
down we ran an additional 120 tests, from which we
concluded that the video and IO operations were
insufficiently tuned to each other. In the next step we
attempted to correct this problem, by implementing our
own video playback software and accompanying
device-level handlers. Our emphasis was on achieving a
controlled, deterministic coordination between the
various system components. An additional set of 32
experiments were carried out on our platforms, which
showed frame-rate increases of up to 325\%, with
associated reductions in rate variance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salehi:1996:SSV,
author = "James D. Salehi and Zhi-Li Zhang and James F. Kurose
and Don Towsley",
title = "Supporting stored video: reducing rate variability and
end-to-end resource requirements through optimal
smoothing",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "222--231",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "VBR compressed video is known to exhibit significant,
multiple-time-scale bit rate variability. In this
paper, we consider the transmission of stored video
from a server to a client across a high speed network,
and explore how the client buffer space can be used
most effectively toward reducing the variability of the
transmitted bit rate. We present two basic results.
First, we present an optimal smoothing algorithm for
achieving the {\em greatest possible reduction in rate
variability\/} when transmitting stored video to a
client with given buffer size. We provide a formal
proof of optimality, and demonstrate the performance of
the algorithm on a set of long MPEG-1 encoded video
traces. Second, we evaluate the impact of optimal
smoothing on the network resources needed for video
transport, under two network service models:
Deterministic Guaranteed service [1, 9] and
Renegotiated CBR (RCBR) service [8, 7]. Under both
models, we find the impact of optimal smoothing to be
dramatic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varki:1996:ABF,
author = "Elizabeth Varki and Lawrence W. Dowdy",
title = "Analysis of balanced fork-join queueing networks",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "232--241",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an analysis of closed, balanced,
fork-join queueing networks with exponential service
time distributions. The fork-join queue is mapped onto
two non-parallel networks, namely, a serial-join model
and a state-dependent model. Using these models, it is
proven that the proportion of the number of jobs in the
different subsystems of the fork-join queueing network
remains constant, irrespective of the multiprogramming
level. This property of balanced fork-join networks is
used to compute quick, inexpensive bounds for arbitrary
fork-join networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carrasco:1996:EEA,
author = "Juan A. Carrasco and Javier Escrib{\'a} and Angel
Calder{\'o}n",
title = "Efficient exploration of availability models guided by
failure distances",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "242--251",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233049",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, a method to bound the steady-state
availability using the failure distance concept has
been proposed. In this paper we refine that method by
introducing state space exploration techniques. In the
methods proposed here, the state space is incrementally
generated based on the contributions to the
steady-state availability band of the states in the
frontier of the currently generated state space.
Several state space exploration algorithms are
evaluated in terms of bounds quality and memory and CPU
time requirements. The more efficient seems to be a
waved algorithm which expands transition groups. We
compare our new methods with the method based on the
failure distance concept without state exploration and
a method proposed by Souza e Silva and Ochoa which uses
state space exploration but does not use the failure
distance concept. Using typical examples we show that
the methods proposed here can be significantly more
efficient than any of the previous methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garg:1996:MCT,
author = "Sachin Garg and Yennun Huang and Chandra Kintala and
Kishor S. Trivedi",
title = "Minimizing completion time of a program by
checkpointing and rejuvenation",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "252--261",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Checkpointing with rollback-recovery is a well known
technique to reduce the completion time of a program in
the presence of failures. While checkpointing is
corrective in nature, rejuvenation refers to preventive
maintenance of software aimed to reduce unexpected
failures mostly resulting from the `aging' phenomenon.
In this paper, we show how both these techniques may be
used together to further reduce the expected completion
time of a program. The idea of using checkpoints to
reduce the amount of rollback upon a failure is taken a
step further by combining it with rejuvenation. We
derive the equations for expected completion time of a
program with finite failure free running time for the
following three cases when; (a) neither checkpointing
nor rejuvenation is employed, (b) only checkpointing is
employed, and finally (c) both checkpointing and
rejuvenation are employed. We also present numerical
results for Weibull failure time distribution for the
above three cases and discuss optimal checkpointing and
rejuvenation that minimizes the expected completion
time. Using the numerical results, some interesting
conclusions are drawn about benefits of these
techniques in relation to the nature of failure
distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kimbrel:1996:IPP,
author = "Tracy Kimbrel and Pei Cao and Edward W. Felten and
Anna R. Karlin and Kai Li",
title = "Integrated parallel prefetching and caching",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "262--263",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leutenegger:1996:BME,
author = "Scott T. Leutenegger and Mario A. Lopez",
title = "A buffer model for evaluating {R}-tree performance",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "264--265",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:1996:ASM,
author = "Joseph L. Hellerstein",
title = "An approach to selecting metrics for detecting
performance problems in information systems",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "266--267",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Courtright:1996:RRP,
author = "William V. {Courtright II} and Garth Gibson and Mark
Holland and Jim Zelenka",
title = "{RAIDframe}: rapid prototyping for disk arrays",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "268--269",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramany:1996:QAR,
author = "Swaminathan Ramany and Derek Eager",
title = "Quantifying achievable routing performance in
multiprocessor interconnection networks",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "270--271",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hotovy:1996:AEW,
author = "Steven Hotovy and David Schneider and Timothy
O'Donnell",
title = "Analysis of the early workload on the {Cornell Theory
Center IBM SP2}",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "272--273",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel computers have matured to the point where
they are capable of running a significant production
workload. Characterizing this workload, however, is far
more complicated than for the single-processor case.
Besides the varying number of processors that may be
invoked, the nodes themselves may provide differing
computational resources (memory size, for example). In
addition, the batch schedulers may introduce further
categories of service which must be considered in the
analysis. The Cornell Theory Center (CTC) put a
512-node IBM SP2 system into production in early 1995.
Extended traces of batch jobs began to be collected in
mid-1995 when the usage base became sufficiently large.
This paper offers an analysis of this early batch
workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Braun:1997:APL,
author = "Hans-Werner Braun",
title = "Architecture and performance of large internets, based
on terrestrial and satellite infrastructure",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "1--1",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258628",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balakrishnan:1997:ASW,
author = "Hari Balakrishnan and Mark Stemm and Srinivasan Seshan
and Randy H. Katz",
title = "Analyzing stability in wide-area network performance",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "2--12",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258631",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Internet is a very large scale, complex, dynamical
system that is hard to model and analyze. In this
paper, we develop and analyze statistical models for
the observed end-to-end network performance based on
extensive packet-level traces (consisting of
approximately 1.5 billion packets) collected from the
primary Web site for the Atlanta Summer Olympic Games
in 1996. We find that observed mean throughputs for
these transfers measured over 60 million complete
connections vary widely as a function of end-host
location and time of day, confirming that the Internet
is characterized by a large degree of heterogeneity.
Despite this heterogeneity, we find (using best-fit
linear regression techniques) that we can express the
throughput for Web transfers to most hosts as a random
variable with a log-normal distribution. Then, using
observed throughput as the control parameter, we
attempt to quantify the {\em spatial\/} (statistical
similarity across neighboring hosts) and {\em
temporal\/} (persistence over time) stability of
network performance. We find that Internet hosts that
are close to each other often have almost identically
distributed probability distributions of throughput. We
also find that throughputs to individual hosts often do
not change appreciably for several minutes. Overall,
these results indicate that there is promise in
protocol mechanisms that cache and share network
characteristics both within a single host and amongst
nearby hosts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maltzahn:1997:PIE,
author = "Carlos Maltzahn and Kathy J. Richardson and Dirk
Grunwald",
title = "Performance issues of enterprise level {Web} proxies",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "13--23",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258668",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Enterprise level Web proxies relay World-Wide Web
traffic between private networks and the Internet. They
improve security, save network bandwidth, and reduce
network latency. While the performance of web proxies
has been analyzed based on synthetic workloads, little
is known about their performance on real workloads. In
this paper we present a study of two web proxies (CERN
and Squid) executing real workloads on Digital's Palo
Alto Gateway. We demonstrate that the simple CERN proxy
architecture outperforms all but the latest version of
Squid and continues to outperform cacheless
configurations. For the measured load levels the Squid
proxy used at least as many CPU, memory, and disk
resources as CERN, in some configurations significantly
more resources. At higher load levels the resource
utilization requirements will cross and Squid will be
the one using fewer resources. Lastly we found that
cache hit rates of around 30\% had very little effect
on the requests service time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heyman:1997:NMA,
author = "D. P. Heyman and T. V. Lakshman and Arnold L.
Neidhardt",
title = "A new method for analysing feedback-based protocols
with applications to engineering {Web} traffic over the
{Internet}",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "24--38",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258670",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most of the studies of feedback-based flow and
congestion control consider only persistent sources
which always have data to send. However, with the rapid
growth of Internet applications built on TCP/IP such as
the World Wide Web and the standardization of traffic
management schemes such as Available Bit Rate (ABR) in
Asynchronous Transfer Mode (ATM) networks, it is
essential to evaluate the performance of feedback-based
protocols using traffic models which are specific to
dominant applications. This paper presents a method for
analysing feedback-based protocols with a Web-user-like
input traffic where the source alternates between
`transfer' periods followed by `think' periods. Our key
results, which are presented for the TCP protocol,
are:(1) The goodputs and the fraction of time that the
system has some given number of transferring sources
are {\em insensitive\/} to the distributions of
transfer (file or page) sizes and think times except
through the ratio of their means. Thus, apart from
network round-trip times, only the ratio of average
transfer sizes and think times of users need be known
to size the network for achieving a specific quality of
service.(2) The Engset model can be adapted to
accurately compute goodputs for TCP and TCP over ATM,
with different buffer management schemes. Though only
these adaptations are given in the paper, the method
based on the Engset model can be applied to analyze
other feedback systems, such as ATM ABR, by finding a
protocol specific adaptation. Hence, the method we
develop is useful not only for analysing TCP using a
source model significantly different from the commonly
used persistent sources, but also can be useful for
analysing other feedback schemes.(3) Comparisons of
simulated TCP traffic to measured Ethernet traffic
shows qualitatively similar autocorrelation when think
times follow a Pareto distribution with infinite
variance. Also, the simulated and measured traffic have
long range dependence. In this sense our traffic model,
which purports to be Web-user-like, also agrees with
measured traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:1997:QME,
author = "Qingming Ma and K. K. Ramakrishnan",
title = "Queue management for explicit rate based congestion
control",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "39--51",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258672",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Rate based congestion control has been considered
desirable, both to deal with the high bandwidth-delay
products of today's high speed networks, and to match
the needs of emerging multimedia applications. Explicit
rate control achieves low loss because sources transmit
smoothly at a rate adjusted through feedback to be
within the capacity of the resources in the network.
However, large feedback delays, presence of higher
priority traffic, and varying transient situations make
it difficult to ensure {\em feasibility\/} (i.e., keep
the aggregate arrival rate below the bottleneck
resource's capacity) while also maintaining high
resource utilization. These conditions along with the
`fast start' desired by data applications often result
in substantial queue buildups. We describe a scheme
that manages the queue buildup at a switch even under
the most aggressive patterns of sources, in the context
of the Explicit Rate option for the Available Bit Rate
(ABR) congestion control scheme. A switch observes the
buildup of its queue, and uses it to reduce the portion
of the link capacity allocated to sources bottlenecked
at that link. We use the concept of a `virtual' queue,
which tracks the amount of queue that has been
`reduced', but has not yet taken effect at the switch.
We take advantage of the natural timing of `resource
management' (RM) cells transmitted by sources. The
scheme is elegant in that it is simple, and we show
that it reduces the queue buildup, in some cases, by
more than two orders of magnitude and the queue size
remains around a desired target. It maintains max-min
fairness even when the queue is being drained. The
scheme is scalable, and is as responsive as can be
expected: within the constraints of the feedback delay.
Finally, no changes are needed to the ATM Forum defined
source/destination policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ott:1997:TAA,
author = "Teunis J. Ott and Neil Aggarwal",
title = "{TCP} over {ATM}: {ABR} or {UBR}?",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "52--63",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258674",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on a simulation study of the
relative performances of the ATM ABR and UBR service
categories in transporting TCP/IP flows through an ATM
Network. The objective is two-fold: (i) to understand
the interaction between the window-based end-to-end
flowcontrol TCP and the rate based flowcontrol ABR
which is restricted to the ATM part of the network, and
(ii) to decide whether the greater complexity of ABR
(than UBR) pays off in better performance of ABR (than
UBR).The most important conclusion is that there does
not seem to be strong evidence that for TCP/IP
workloads the greater complexity of ABR pays off in
better performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kasera:1997:SRM,
author = "Sneha K. Kasera and Jim Kurose and Don Towsley",
title = "Scalable reliable multicast using multiple multicast
groups",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "64--74",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258676",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine an approach for providing reliable,
scalable multicast communication, using multiple
multicast groups for reducing receiver processing costs
in a multicast session. In this approach a single
multicast group is used for the original transmission
of packets. Retransmissions of packets are done to
separate multicast groups, which receivers dynamically
join or leave. We first show that by using an infinite
number of multicast groups, processing overhead at the
receivers are substantially reduced. Next, we show
that, for a specific negative acknowledgment
(NAK)-based protocol, most of this reduction can be
obtained by using only a small number of multicast
groups for a wide range of system parameters. Finally,
we present a local filtering scheme for minimizing
join/leave signaling when multiple multicast groups are
used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajamony:1997:PDS,
author = "Ramakrishnan Rajamony and Alan L. Cox",
title = "Performance debugging shared memory parallel programs
using run-time dependence analysis",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "75--87",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258678",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe a new approach to performance debugging
that focuses on automatically identifying computation
transformations to reduce synchronization and
communication. By grouping writes together into {\em
equivalence classes}, we are able to tractably collect
information from long-running programs. Our performance
debugger analyzes this information and suggests
computation transformations in terms of the source
code. We present the transformations suggested by the
debugger on a suite of four applications. For
Barnes--Hut and Shallow, implementing the debugger
suggestions improved the performance by a factor of
1.32 and 34 times respectively on an 8-processor IBM
SP2. For Ocean, our debugger identified excess
synchronization that did not have a significant impact
on performance. ILINK, a genetic linkage analysis
program widely used by geneticists, is already well
optimized. We use it only to demonstrate the
feasibility of our approach to long-running
applications. We also give details on how our approach
can be implemented. We use novel techniques to convert
control dependences to data dependences, and to compute
the source operands of stores. We report on the impact
of our instrumentation on the same application suite we
use for performance debugging. The instrumentation
slows down the execution by a factor of between 4 and
169 times. The log files produced during execution were
all less than 2.5 Mbytes in size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Herbordt:1997:PSC,
author = "Martin C. Herbordt and Owais Kidwai and Charles C.
Weems",
title = "Preprototyping {SIMD} coprocessors using virtual
machine emulation and trace compilation",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "88--99",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258679",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of massively parallel SIMD array architectures
is proliferating in the area of domain specific
coprocessors. Even so, they have undergone few
systematic empirical studies. The underlying problems
include the size of the architecture space, the lack of
portability of the test programs, and the inherent
complexity of simulating up to hundreds of thousands of
processing elements. We address the computational cost
problem with a novel approach to trace-based
simulation. Code is run on an abstract virtual machine
to generate a coarse-grained trace, which is then
refined through a series of transformations (a process
we call {\em trace compilation\/}) wherein greater
resolution is obtained with respect to the details of
the target machine. We have found this technique to be
one to two orders of magnitude faster than
instruction-level simulation while still retaining much
of the accuracy of the model. Furthermore, abstract
machine traces must be regenerated for only a small
fraction of the possible parameter combinations. Using
virtual machine emulation and trace compilation also
addresses program portability by allowing the user to
code in a single data parallel language with a single
compiler, regardless of the target architecture. This
technique has already been used to generate significant
results with respect to SIMD array architectures, a
sample of which are presented here.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tomkins:1997:IMP,
author = "Andrew Tomkins and R. Hugo Patterson and Garth
Gibson",
title = "Informed multi-process prefetching and caching",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "100--114",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258680",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Informed prefetching and caching based on application
disclosure of future I/O accesses (hints) can
dramatically reduce the execution time of I/O-intensive
applications. A recent study showed that, in the
context of a single hinting application, prefetching
and caching algorithms should adapt to the dynamic load
on the disks to obtain the best performance. In this
paper, we show how to incorporate adaptivity to disk
load into the TIP2 system, which uses {\em cost-benefit
analysis\/} to allocate global resources among multiple
processes. We compare the resulting system, which we
call TIPTOE (TIP with Temporal Overload Estimators) to
Cao et al's LRU-SP allocation scheme, also modified to
include adaptive prefetching. Using disk-accurate
trace-driven simulation we show that, averaged over
eleven experiments involving pairs of hinting
applications, and with data striped over one to ten
disks, TIPTOE delivers 7\% lower execution time than
LRU-SP. Where the computation and I/O demands of each
experiment are closely matched, in a two-disk array,
TIPTOE delivers 18\% lower execution time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glass:1997:APR,
author = "Gideon Glass and Pei Cao",
title = "Adaptive page replacement based on memory reference
behavior",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "115--126",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258681",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As disk performance continues to lag behind that of
memory systems and processors, virtual memory
management becomes increasingly important for overall
system performance. In this paper we study the page
reference behavior of a collection of memory-intensive
applications, and propose a new virtual memory page
replacement algorithm, SEQ. SEQ detects long sequences
of page faults and applies most-recently-used
replacement to those sequences. Simulations show that
for a large class of applications, SEQ performs close
to the optimal replacement algorithm, and significantly
better than Least-Recently-Used (LRU). In addition, SEQ
performs similarly to LRU for applications that do not
exhibit sequential faulting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Voelker:1997:MSL,
author = "Geoffrey M. Voelker and Herv{\'e} A. Jamrozik and Mary
K. Vernon and Henry M. Levy and Edward D. Lazowska",
title = "Managing server load in global memory systems",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "127--138",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258682",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New high-speed switched networks have reduced the
latency of network page transfers significantly below
that of local disk. This trend has led to the
development of systems that use network-wide memory, or
{\em global\/} memory, as a cache for virtual memory
pages or file blocks. A crucial issue in the
implementation of these global memory systems is the
selection of the target nodes to receive replaced
pages. Current systems use various forms of an
approximate global LRU algorithm for making these
selections. However, using age information alone can
lead to suboptimal performance in two ways. First,
workload characteristics can lead to uneven
distributions of old pages across servers, causing
increased contention delays. Second, the global memory
traffic imposed on a node can degrade the performance
of local jobs on that node. This paper studies the
potential benefit and the potential harm of using load
information, in addition to age information, in global
memory replacement policies. Using an analytic queueing
network model, we show the extent to which server load
can degrade remote memory latency and how load
balancing solves this problem. Load balancing requests
can cause the system to deviate from the global LRU
replacement policy, however. Using trace-driven
simulation, we study the impact on application
performance of deviating from the LRU replacement
policy. We find that deviating from strict LRU, even
significantly for some applications, does not affect
application performance. Based upon these results, we
conclude that global memory systems can gain
substantial benefit from load balancing requests with
little harm from suboptimal replacement decisions.
Finally, we illustrate the use of the intuition gained
from the model and simulation experiments by proposing
a new family of algorithms that incorporate load
considerations as well as age information in global
memory replacement decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Woodward:1997:SLB,
author = "Michael E. Woodward",
title = "Size-limited batch movement in product-form closed
discrete-time queueing networks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "139--146",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258683",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing models for product-form closed discrete-time
queueing networks with batch movement of customers
implicitly assume that batch sizes are unrestricted. In
many practical modelling situations however, it is
necessary to impose restrictions on the batch sizes,
and this paper examines the repercussions of such
restrictions on the product-form properties of the
networks. It is shown that when batch sizes are
restricted independently then, in general, the
resulting networks cannot have a product-form
equilibrium distribution. Sufficient conditions to
retain a product-form are derived in the cases when
batch sizes are either correlated or depend on the
state of the network. Examples of applying the results
to obtain product-form networks with both correlated
and state dependent batch movement are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:1997:BPM,
author = "Leana Golubchik and John C. S. Lui",
title = "Bounding of performance measures for a threshold-based
queueing system with hysteresis",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "147--157",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258684",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider a $K$-server
threshold-based queueing system with hysteresis in
which the number of servers, employed for servicing
customers, is governed by a {\em forward threshold\/}
vector $ F = (F_1, F_2, \ldots {}, F_{K - 1})$ (where $
F_1 < F_2 < F_{K - 1}$) and a {\em reverse threshold\/}
vector $ R = (R_1, R_2, \ldots {}, R_{K - 1})$ (where $
R_1 < R_2 < R_{K - 1}$). There are many applications
where a threshold-based queueing system can be of great
use. The main motivation for using a threshold-based
approach in such applications is that they incur
significant server setup, usage, and removal costs.
And, as in most practical situations, an important
concern is not only the system performance but rather
its cost/performance ratio. The motivation for use of
hysteresis is to control the cost during momentary
fluctuations in workload. An important and
distinguishing characteristic of our work is that in
our model we consider the {\em time to add a server to
be non-negligible.\/} This is a more accurate model,
for many applications, than previously considered in
other works. Our main goal in this work is to develop
an efficient method for computing the steady state
probabilities of a multi-server threshold queueing
system with hysteresis, which will, in turn, allow
computation of various performance measures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lehoczky:1997:URT,
author = "John P. Lehoczky",
title = "Using real-time queueing theory to control lateness in
real-time systems",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "158--168",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258685",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents {\em real-time queueing theory}, a
new theory which embeds the ability of real-time
scheduling theory to determine whether task timing
requirements are met into the context of queueing
models. Specifically, this paper extends the analysis
developed in Lehoczky [9] to the GI/M/1 case. The paper
also applies these models to study queue control
strategies which can control customer lateness.
Arriving customers have deadlines drawn from a general
deadline distribution. The state variable for the
queueing system must include the number in the queue
(with supplementary variables as needed to create a
Markov model) and the {\em lead-time\/} (deadline minus
current time) of each customer; thus the state space is
infinite dimensional. One can represent the state of
the system as a measure on the real line and can
represent that measure by its Fourier transform. Thus,
a real-time queueing system can be characterized as a
Markov process evolving on the space of Fourier
transforms, and this paper presents a characterization
of the instantaneous simultaneous lead-time profile of
all the customers in the queue. This profile is
complicated; however, in the heavy traffic case, a
simple description of the lead-time profile emerges,
namely that the lead-time profile behaves like a
Brownian motion evolving on a particular manifold of
Fourier transforms; the manifold depending upon the
queue discipline and the customer deadline
distributions. This approximation is very accurate when
compared with simulations. Real-time queueing theory
focuses on how well a particular queue discipline meets
customer timing requirements, and focuses on the
dynamic rather than the equilibrium behavior of the
system. As such, it offers the potential to study
control strategies to ensure that customers meet their
deadlines. This paper illustrates the analysis and
performance evaluation for certain queue control
strategies. Generalizations to more complicated models
and to queueing networks are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nahum:1997:CBN,
author = "Erich Nahum and David Yates and Jim Kurose and Don
Towsley",
title = "Cache behavior of network protocols",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "169--180",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258686",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a performance study of memory
reference behavior in network protocol processing,
using an Internet-based protocol stack implemented in
the $x$-kernel running in user space on a MIPS
R4400-based Silicon Graphics machine. We use the
protocols to drive a validated execution-driven
architectural simulator of our machine. We characterize
the behavior of network protocol processing, deriving
statistics such as cache miss rates and percentage of
time spent waiting for memory. We also determine how
sensitive protocol processing is to the architectural
environment, varying factors such as cache size and
associativity, and predict performance on future
machines. We show that network protocol cache behavior
varies widely, with miss rates ranging from 0 to 28
percent, depending on the scenario. We find instruction
cache behavior has the greatest effect on protocol
latency under most cases, and that cold cache behavior
is very different from warm cache behavior. We
demonstrate the upper bounds on performance that can be
expected by improving memory behavior, and the impact
of features such as associativity and larger cache
sizes. In particular, we find that TCP is more
sensitive to cache behavior than UDP, gaining larger
benefits from improved associativity and bigger caches.
We predict that network protocols will scale well with
CPU speeds in the future.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Knightly:1997:SMR,
author = "Edward W. Knightly",
title = "Second moment resource allocation in multi-service
networks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "181--191",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258687",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A crucial problem for the efficient design and
management of integrated services networks is how to
best allocate network resources for heterogeneous and
bursty traffic streams in multiplexers that support
prioritized service disciplines. In this paper, we
introduce a new approach for determining per-connection
performance parameters such as delay-bound violation
probability and loss probability in multi-service
networks. The approach utilizes a traffic
characterization consisting of the variances of a
stream's rate distribution over multiple interval
lengths, which captures its burstiness properties and
autocorrelation structure. From this traffic
characterization, we provide a simple and efficient
resource allocation algorithm by deriving stochastic
delay-bounds for static priority schedulers and
employing a Gaussian approximation over intervals. To
evaluate the scheme, we perform trace-driven simulation
experiments with long traces of MPEG-compressed video
and show that our approach is accurate enough to
capture most of the inherent statistical multiplexing
gain, achieving average network utilizations of up to
90\% for these traces and substantially outperforming
previous `effective bandwidth' techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krunz:1997:CVM,
author = "Marwan Krunz and Satish K. Tripathi",
title = "On the characterization of {VBR MPEG} streams",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "192--202",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a comprehensive model for variable-bit-rate
MPEG video streams. This model captures the bit-rate
variations at multiple time scales. Long-term
variations are captured by incorporating scene changes,
which are most noticeable in the fluctuations of $I$
frames. The size of an $I$ frame is modeled by the sum
of two random components: a scene-related component and
an AR(2) component that accounts for the fluctuations
within a scene. Two random processes of {\em i.i.d.\/}
rvs are used to model the sizes of {\em P\/} and $B$
frames, respectively. The complete model is then
obtained by intermixing the three sub-models according
to a given GOP pattern. It is shown that the composite
model exhibits long-range dependence (LRD) in the sense
that its autocorrelation function is non-summable. The
LRD behavior is caused by the repetitive GOP pattern
which induces periodic cross-correlations between
different types of frames. Using standard statistical
methods, we successfully fit our model to several
empirical video traces. We then study the queueing
performance for video traffic at a statistical
multiplexer. The results show that the model is
sufficiently accurate in predicting the queueing
performance for real video streams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1997:FSA,
author = "Keith A. Smith and Margo I. Seltzer",
title = "File system aging --- increasing the relevance of file
system benchmarks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "203--213",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarks are important because they provide a means
for users and researchers to characterize how their
workloads will perform on different systems and
different system architectures. The field of file
system design is no different from other areas of
research in this regard, and a variety of file system
benchmarks are in use, representing a wide range of the
different user workloads that may be run on a file
system. A realistic benchmark, however, is only one of
the tools that is required in order to understand how a
file system design will perform in the real world. The
benchmark must also be executed on a realistic file
system. While the simplest approach may be to measure
the performance of an empty file system, this
represents a state that is seldom encountered by real
users. In order to study file systems in more
representative conditions, we present a methodology for
aging a test file system by replaying a workload
similar to that experienced by a real file system over
a period of many months, or even years. Our aging tools
allow the same aging workload to be applied to multiple
versions of the same file system, allowing scientific
evaluation of the relative merits of competing file
system designs. In addition to describing our aging
tools, we demonstrate their use by applying them to
evaluate two enhancements to the file layout policies
of the UNIX fast file system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brown:1997:OSB,
author = "Aaron B. Brown and Margo I. Seltzer",
title = "Operating system benchmarking in the wake of {\tt
lmbench}: a case study of the performance of {NetBSD}
on the {Intel x86} architecture",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "214--224",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258690",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The {\tt lmbench} suite of operating system
microbenchmarks provides a set of portable programs for
use in cross-platform comparisons. We have augmented
the {\tt lmbench} suite to increase its flexibility and
precision, and to improve its methodological and
statistical operation. This enables the detailed study
of interactions between the operating system and the
hardware architecture. We describe modifications to
{\tt lmbench}, and then use our new benchmark suite,
{\tt hbench:OS}, to examine how the performance of
operating system primitives under NetBSD has scaled
with the processor evolution of the Intel x86
architecture. Our analysis shows that off-chip memory
system design continues to influence operating system
performance in a significant way and that key design
decisions (such as suboptimal choices of DRAM and cache
technology, and memory-bus and cache coherency
protocols) can essentially nullify the performance
benefits of the aggressive execution core and
sophisticated on-chip memory system of a modern
processor such as the Intel Pentium Pro.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "See long rebuttal in {\tt hbench-REBUTTAL} in
\url{http://bitmover.com/lmbench/} source code.",
}
@Article{Acharya:1997:UEI,
author = "Anurag Acharya and Guy Edjlali and Joel Saltz",
title = "The utility of exploiting idle workstations for
parallel computation",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "225--234",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258691",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine the utility of exploiting
idle workstations for parallel computation. We attempt
to answer the following questions. First, given a
workstation pool, for what fraction of time can we
expect to find a cluster of $k$ workstations available?
This provides an estimate of the opportunity for
parallel computation. Second, how stable is a cluster
of free machines and how does the stability vary with
the size of the cluster? This indicates how frequently
a parallel computation might have to stop for adapting
to changes in processor availability. Third, what is
the distribution of workstation idle-times? This
information is useful for selecting workstations to
place computation on. Fourth, how much benefit can a
user expect? To state this in concrete terms, if I have
a pool of size $S$, how big a parallel machine should I
expect to get for free by harvesting idle machines.
Finally, how much benefit can be achieved on a real
machine and how hard does a parallel programmer have to
work to make this happen? To answer the
workstation-availability questions, we have analyzed
14-day traces from three workstation pools. To
determine the equivalent parallel machine, we have
simulated the execution of a group of well-known
parallel programs on these workstation pools. To gain
an understanding of the practical problems, we have
developed the system support required for adaptive
parallel programs and have used it to build an adaptive
parallel computational fluid dynamics application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qin:1997:PEC,
author = "Xiaohan Qin and Jean-Loup Baer",
title = "A performance evaluation of cluster architectures",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "237--247",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates the performance of
shared-memory cluster-based architectures where each
cluster is a shared-bus multiprocessor augmented with a
protocol processor maintaining cache coherence across
clusters. For a given number of processors, sixteen in
this study, we evaluate the performance of various
cluster configurations. We also consider the impact of
adding a remote shared cache in each cluster. We use
Mean Value Analysis to estimate the cache miss
latencies of various types and the overall execution
time. The service demands of shared resources are
characterized in detail by examining the sub-requests
issued in resolving cache misses. In addition to the
architectural system parameters and the service demands
on resources, the analytical model needs parameters
pertinent to applications. The latter, in particular
cache miss profiles, are obtained by trace-driven
simulation of three benchmarks. Our results show that
without remote caches the performance of cluster-based
architectures is mixed. In some configurations, the
negative effects of the longer latency of inter-cluster
misses and of the contention on the protocol processor
are too large to counter-balance the lower contention
on the data buses. For two out of the three
applications best results are obtained when the system
has clusters of size 2 or 4. The cluster-based
architectures with remote caches consistently
outperform the single bus system for all 3
applications. We also exercise the model with
parameters reflecting the current trend in technology
making the processor relatively faster than the bus and
memory. Under these new conditions, our results show a
clear performance advantage for the cluster-based
architectures, with or without remote caches, over
single bus systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiueh:1997:DED,
author = "Tzi-cker Chiueh and Srinidhi Varadarajan",
title = "Design and evaluation of a {DRAM}-based shared memory
{ATM} switch",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "248--259",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em Beluga\/} is a single-chip switch architecture
specifically targeted at local area ATM networks, and
it features three architectural innovations. First, an
interconnection hierarchy composed of multiple
switching fabrics is built into the chip to provide
both low-latency cell transfer when the traffic is
light and low cell drop rate under heavy load.
Secondly, to improve silicon efficiency, Beluga is
based on shared memory architecture, and the buffers
are implemented using DRAM rather than SRAM technology.
Heavy interleaving and selective invalidation are used
to address long latency and periodic refreshing
problems, respectively. Thirdly, Beluga supports
multicast with minimal physical bit replication. It
also separates support for unicast and multicast cells
to optimize for the common case, where multicast cells
occur infrequently. This paper describes the design
details of {\em Beluga\/} and the results of a
comprehensive simulation study to quantify the
performance impact of each of its architectural
features. The most important result from this research
is that DRAM-based buffer implementation significantly
reduces the cell-drop rate during heavy while
exhibiting almost identical cell latency to SRAM-based
implementation during light load. Therefore, we believe
DRAM makes an attractive alternative for switch buffer
implementation, especially for single-chip architecture
such as {\em Beluga.\/}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Song:1997:ERC,
author = "Junehwa Song and Asit Dan and Dinkar Sitaram",
title = "Efficient retrieval of composite multimedia objects in
the {JINSIL} distributed system",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "260--271",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258695",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a distributed environment, presentation of
structured, composite multimedia information poses new
challenges in dealing with variable bandwidth (BW)
requirement and synchronization of media data objects.
The detailed knowledge of BW requirement obtained by
analyzing the document structure can be used to create
a prefetch schedule that results in efficient
utilization of system resources. A distributed
environment consists of various system components that
are either dedicated to a client or shared across
multiple clients. Shared system components could
benefit from {\em Fine Granularity Advanced Reservation
(FGAR)\/} of resources based on true BW requirement.
Prefetching by utilizing advance knowledge of BW
requirement can further improve resource utilization.
In this paper, we describe the JINSIL retrieval system
that takes into account the available bandwidth and
buffer resources and the nature of sharing in each
component on the delivery path. It reshapes BW
requirement, creates prefetch schedule for efficient
resource utilization in each component, and reserves
necessary BW and buffer. We also consider good choices
for placement of prefetch buffers across various system
components.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gibson:1997:FSS,
author = "Garth A. Gibson and David F. Nagle and Khalil Amiri
and Fay W. Chang and Eugene M. Feinberg and Howard
Gobioff and Chen Lee and Berend Ozceri and Erik Riedel
and David Rochberg and Jim Zelenka",
title = "File server scaling with network-attached secure
disks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "272--284",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "By providing direct data transfer between storage and
client, network-attached storage devices have the
potential to improve scalability for existing
distributed file systems (by removing the server as a
bottleneck) and bandwidth for new parallel and
distributed file systems (through network striping and
more efficient data paths). Together, these advantages
influence a large enough fraction of the storage market
to make commodity network-attached storage feasible.
Realizing the technology's full potential requires
careful consideration across a wide range of file
system, networking and security issues. This paper
contrasts two network-attached storage
architectures---(1) Networked SCSI disks (NetSCSI) are
network-attached storage devices with minimal changes
from the familiar SCSI interface, while (2)
Network-Attached Secure Disks (NASD) are drives that
support independent client access to drive object
services. To estimate the potential performance
benefits of these architectures, we develop an analytic
model and perform trace-driven replay experiments based
on AFS and NFS traces. Our results suggest that NetSCSI
can reduce file server load during a burst of NFS or
AFS activity by about 30\%. With the NASD architecture,
server load (during burst activity) can be reduced by a
factor of up to five for AFS and up to ten for NFS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsiolis:1997:GGC,
author = "Athanassios K. Tsiolis and Mary K. Vernon",
title = "Group-guaranteed channel capacity in multimedia
storage servers",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "285--297",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the open questions in the design of multimedia
storage servers is in what order to serve incoming
requests. Given the capability provided by the disk
layout and scheduling algorithms to serve multiple
streams simultaneously, improved request scheduling
algorithms can reduce customer waiting times. This
results in better service and/or lower customer loss.
In this paper we define a new class of request
scheduling algorithms, called Group-Guaranteed Server
Capacity (GGSC), that preassign server channel capacity
to groups of objects. We also define a particular
formal method for computing the assigned capacities to
achieve a given performance objective. We observe that
the FCFS policy can provide the precise time of service
to incoming customer requests. Under this assumption,
we compare the performance of one of the new GGSC
algorithms, GGSC
W-FCFS, against FCFS and against two other recently
proposed scheduling algorithms: Maximum Factored Queue
length (MFQ), and the FCFS-n algorithm that preassigns
capacity only to each of the $n$ most popular objects.
The algorithms are compared for both {\em competitive
market\/} and {\em captured audience\/} environments.
Key findings of the algorithm comparisons are that: (1)
FCFS-n has no advantage over FCFS if FCFS gives time of
service guarantees to arriving customers, (2) FCFS and
GGSCW-FCFS are superior to MFQ for both competitive and
captive audience environments, (3) for competitive
servers that are configured for customer loss less than
10\%, FCFS is superior to all other algorithms examined
in this paper, and (4) for captive audience
environments that have objects with variable playback
length, GGSCW-FCFS is the most promising of the
policies considered in this paper. The conclusions for
FCFS-n and MFQ differ from previous work because we
focus on competitive environments with customer loss
under 10\%, we assume FCFS can provide time of service
guarantees to all arriving customers, and we consider
the distribution of customer waiting time as well as
the average waiting time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muntz:1997:SIM,
author = "Richard Muntz",
title = "Special Issue on Multimedia Storage Systems",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "2--2",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.581190",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ozden:1997:AIM,
author = "Banu {\"O}zden and Rajeev Rastogi and Avi
Silberschatz",
title = "Architecture issues in multimedia storage systems",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "3--12",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262394",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Next generation storage systems will need to provide
support for both textual data and other types of
multimedia data (e.g., images, video, audio). These two
types of data differ in their characteristics, and
hence require different techniques for their
organization and management. In this paper, we provide
an overview of (1) how storage systems can be
architectured to support multimedia data, and (2) what
are the main challenges in devising new algorithms to
manage multimedia data. In order to provide rate
guarantees for continuous media data, an admission
control scheme must be employed that determines, for
each client, whether there are sufficient resources
available to service that client. To maximize the
number of clients that can be admitted concurrently,
the various system resources must be allocated and
scheduled carefully. In terms of disks, we use
algorithms for retrieving/storing data from/to disks
that reduce seek latency time and eliminate rotational
delay, thereby providing high throughput. In terms of
main-memory, we use buffer management schemes that
exploit the sequential access patterns for continuous
media data, thereby resulting in efficient replacement
of buffer pages from the cache.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:1997:BSV,
author = "Weifeng Shi and Shahram Ghandeharizadeh",
title = "Buffer sharing in video-on-demand servers",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "13--20",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262396",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a buffer sharing technique that
strikes a balance between the use of disk bandwidth and
memory in order to maximize the performance of a
video-on-demand server. We make the key observation
that the configuration parameters of the system should
be independent of the physical characteristics of the
data (e.g., popularity of a clip). Instead, the
configuration parameters are fixed and our strategy
adjusts itself dynamically at run-time to support a
pattern of access to the video clips.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:1997:ITD,
author = "Leana Golubchik",
title = "On issues and tradeoffs in design of fault tolerant
{VOD} servers",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "21--28",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent technological advances in digital signal
processing, data compression techniques, and high speed
communication networks have made Video-on-Demand (VOD)
servers feasible. A challenging task in such systems is
servicing multiple clients simultaneously while
satisfying real-time requirements of continuous
delivery of objects at specified rates. To accomplish
these tasks and realize economies of scale associated
with servicing a large user population, a VOD server
requires a large disk subsystem. Although a single disk
is fairly reliable, a large disk farm can have an
unacceptably high probability of disk failure.
Furthermore, due to real-time constraints, the
reliability and availability requirements of VOD
systems are even more stringent than those of
traditional information systems. In this paper we
discuss some of the main issues and tradeoffs
associated with providing fault tolerance in multidisk
VOD systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muntz:1997:RRT,
author = "Richard Muntz and Jose Renato Santos and Steve
Berson",
title = "{RIO}: a real-time multimedia object server",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "29--35",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A multimedia object server must be ready to handle a
variety of media object types (video, audio, image, 3D
interactive, etc.) as well as non real-time workload.
Even when a homogeneous set of object types are
maintained in the store (e.g., all videos) the storage
system workload is generally quite variable due to the
need to provide for example, VCR functionality,
multiple playout rates, different resolution levels for
the same objects, etc. Attempting to carefully layout
data and optimally schedule delivery to meet
just-in-time delivery constraints is very difficult in
the face of this heterogeneous workload. Our approach
to the unpredictability of the I/O workload is to
randomize the allocation of disk blocks. This turns all
workloads into the same uniformly random access pattern
and thus gives one problem to deal with. The main
disadvantage of this approach is that statistical
variation can result in short term imbalances in disk
utilization which in turn, cause large variances in
latencies. Our approach to this problem is to introduce
limited redundancy and asynchronous scheduling for
short term load balancing. This approach is being
implemented in the RIO (Random I/O) multimedia object
server. The RIO multimedia object server provides
applications a guaranteed rate of storage access with
bounded delay even at very high ({\em > 90\%\/}) disk
utilization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Colajanni:1997:ATS,
author = "Michele Colajanni and Philip S. Yu",
title = "Adaptive {TTL} schemes for load balancing of
distributed {Web} servers",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "36--42",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With ever increasing web traffic, a distributed Web
system can provide scalability and flexibility to cope
with growing client demands. Load balancing algorithms
to spread the load across multiple Web servers are
crucial to achieve the scalability. Various {\em domain
name server\/} (DNS) based schedulers have been
proposed in the literature, mainly for multiple
homogeneous servers. DNS provides (logical) host name
to IP-address mapping (i.e., the server assignment),
but the mapping is not done for each server access.
This is because the address mapping is cached for a
time-to-live (TTL) period to reduce network traffic.
The presence of heterogeneous Web servers not only
increases the complexity of the DNS scheduling problem,
but also makes previously proposed algorithms for
homogeneous distributed systems such as round robin not
directly applicable. This leads us to propose new
policies, called {\em adaptive TTL\/} algorithms, that
take both the uneven distribution of client request
rates and heterogeneity of Web servers into account to
adaptively set the TTL value for each address mapping
request. Extensive simulation results show that these
strategies are effective in balancing load among
geographically distributed heterogeneous Web servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kotz:1997:SIP,
author = "David Kotz",
title = "Special Issue on Parallel {I/O} Systems",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "2--2",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.581191",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cormen:1997:CFP,
author = "Thomas H. Cormen and David M. Nicol",
title = "Out-of-core {FFTs} with parallel disks",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "3--12",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine approaches to computing the Fast Fourier
Transform (FFT) when the data size exceeds the size of
main memory. Analytical and experimental evidence shows
that relying on native virtual memory with demand
paging can yield extremely poor performance. We then
present approaches based on minimizing I/O costs with
the Parallel Disk Model (PDM). Each of these approaches
explicitly plans and performs disk accesses so as to
minimize their number.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papadopouli:1997:SVV,
author = "Maria Papadopouli and Leana Golubchik",
title = "Support of {VBR} video streams under disk bandwidth
limitations",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "13--20",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present scheduling techniques for a {\em
scalable\/} video server in a multi-disk environment.
The scheduling of the retrieval is introduced in a
dynamic rate-distortion context that exploits both the
multiresolution property of video and replication
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bordawekar:1997:EEH,
author = "Rajesh Bordawekar and Steven Landherr and Don Capps
and Mark Davis",
title = "Experimental evaluation of the {Hewlett--Packard}
{Exemplar} file system",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "21--28",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This article presents results from an experimental
evaluation study of the HP Exemplar file system. Our
experiments consist of simple micro-benchmarks that
study the impact of various factors on the file system
performance. These factors include I/O request/buffer
sizes, vectored/non-vectored access patterns,
read-ahead policies, multi-threaded (temporally
irregular) requests, and architectural issues (cache
parameters, NUMA behavior, etc.). Experimental results
indicate that the Exemplar file system provides high
I/O bandwidth, both for single- and multi-threaded
applications. The buffer cache, with prioritized buffer
management and large buffer sizes, is effective in
exploiting temporal and spatial access localities. The
performance of non-contiguous accesses can be improved
by either using vectored I/O interfaces or tuning the
read-ahead facilities. The file system performance
depends on the relative locations of the computing
threads and the file system, and also on various
Exemplar design parameters such as the NUMA
architecture, TLB/data cache management and paging
policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rochberg:1997:PNE,
author = "David Rochberg and Garth Gibson",
title = "Prefetching over a network: early experience with
{CTIP}",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "29--36",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss CTIP, an implementation of a network
filesystem extension of the successful TIP informed
prefetching and cache management system. Using a
modified version of TIP in NFS client machines (and
unmodified NFS servers). CTIP takes advantage of
application-supplied hints that disclose the
application's future read accesses. CTIP uses these
hints to aggressively prefetch file data from an NFS
file server and to make better local cache replacement
decisions. This prefetching hides disk latency and
exposes storage parallelism. Preliminary measurements
that show CTIP can reduce execution time by a ratio
comparable to that obtained with local TIP over a suite
of I/O-intensive hinting applications. (For four disks,
the reductions in execution time range from 17\% to
69\%). If local TIP execution requires that data first
be loaded from remote storage into a local scratch
area, then CTIP execution is significantly faster than
the aggregate time for loading the data and executing.
Additionally, our measurements show that the benefit of
CTIP for hinting applications improves in the face of
competition from other clients for server resources. We
conclude with an analysis of the remaining problems
with using unmodified NFS servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menon:1997:DVD,
author = "Jai Menon and Kent Treiber",
title = "{Daisy}: virtual-disk hierarchical storage manager",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "37--44",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1998:SIT,
author = "David M. Nicol",
title = "Special Issue on the {Telecommunications Description
Language}",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "3--3",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.581192",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perumalla:1998:TLM,
author = "Kalyan Perumalla and Richard Fujimoto and Andrew
Ogielski",
title = "{TED} --- a language for modeling telecommunication
networks",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "4--11",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TeD is a language designed mainly for modeling
telecommunication networks. The TeD language
specification is separated into two parts --- (1) a
{\em meta\/} language (2) an {\em external\/} language.
The meta language specification is concerned with the
high-level description of the structural and behavioral
interfaces of various network elements. The external
language specification is concerned with the detailed
low-level description of the implementation of the
structure and behavior of the network elements. In this
document, we present an introduction to the TeD
language, along with a brief tutorial using an example
model of a simple ATM multiplexer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perumalla:1998:TMA,
author = "Kalyan Perumalla and Matthew Andrews and Sandeep
Bhatt",
title = "{TED} models for {ATM} internetworks",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "12--21",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe our experiences designing and implementing
a virtual PNNI network testbed. The network elements
and signaling protocols modeled are consistent with the
ATM Forum {\em PNNI\/} draft specifications. The models
will serve as a high-fidelity testbed of the transport
and network layers for simulation-based studies of the
scalability and performance of PNNI protocols. Our
models are written in the new network description
language
TeD which offers two advantages. First, the testbed
design is transparent; the model descriptions are
developed separately from, and are independent of, the
simulation-specific code. Second, TeD is compiled to
run with the GTW (Georgia Tech Time Warp) simulation
engine which is supported on shared-memory
multiprocessors. Therefore, we directly obtain the
advantages of parallel simulation. This is one of the
first complex tests of the TeD modeling and simulation
software system. The feedback from our experiences
resulted in some significant improvements to the
simulation software. The resulting {\em PNNI\/} models
are truly transparent and the performance of the
simulations is encouraging. We give results from
preliminary simulations of call admission, set-up and
tear-down in sample {\em PNNI\/} networks consisting of
two hundred nodes and over three hundred edges. The
time to simulate ten thousand call requests decreases
significantly with the number of processors; we observe
a speedup factor of 5.05 when 8 processors are employed
compared to a single processor. Our initial
implementations demonstrate the advantages of TeD for
parallel simulations of large-scale networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rubenstein:1998:OPS,
author = "Dan Rubenstein and Jim Kurose and Don Towsley",
title = "Optimistic parallel simulation of reliable multicast
protocols",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "22--29",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel discrete-event simulation offers the promise
of harnessing the computational power of multiple
processors in order to reduce the time needed for
simulation-based performance studies. In this paper, we
investigate the use of {\em optimistic parallel
simulation techniques\/} in simulating reliable
multicast communication network protocols. Through
empirical studies (using the TeD simulation programming
language, the Georgia Tech time warp simulator, and a
12-processor SGI Challenge), we find that these
parallelized simulations can run noticeably faster than
a uniprocessor simulation and, in a number of cases,
can make effective use of parallel resources. These
results are somewhat surprising because reliable
multicast protocols require considerable communication
(and hence synchronization) among different network
entities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panchal:1998:PSW,
author = "Jignesh Panchal and Owen Kelly and Jie Lai and Narayan
Mandayam and Andarew T. Ogielski and Roy Yates",
title = "Parallel simulations of wireless networks with {TED}:
radio propagation, mobility and protocols",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "30--39",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe the
TeD/C++ implementation of {\em WiPPET}, a parallel
simulation testbed for mobile wireless networks. In
this article we emphasize the techniques for modeling
of radio propagation (long- and short-scale fading and
interference) and protocols for integrated radio
resource management in mobile wireless voice networks.
The testbed includes the standards-based AMPS, NA-TDMA
and GSM protocols, and several research-oriented
protocol families.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Premore:1998:TNT,
author = "Brian J. Premore and David M. Nicol",
title = "Transformation of {\em ns\/} {TCP} models to {TED}",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "40--48",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers problems that arise when
transforming TCP models developed using the {\em ns\/}
simulator, to the TeD meta-language. The raison
d'{\^e}tre for this project is to evaluate the
potential of TeD as the target of an automated
simulation model transformation system, so as to
exploit the considerable existing modeling work that
has already been conducted using {\em ns}. By
transforming {\em ns\/} models to TeD we hope to
provide high performance parallel simulation to
detailed and accurate network models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Srinivasan:1998:FIL,
author = "V. Srinivasan and George Varghese",
title = "Faster {IP} lookups using controlled prefix
expansion",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "1--10",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet (IP) address lookup is a major bottleneck in
high performance routers. IP address lookup is
challenging because it requires {\em a longest matching
prefix\/} lookup. It is compounded by increasing
routing table sizes, increased traffic, higher speed
links, and the migration to 128 bit IPv6 addresses. We
describe how IP lookups can be made faster using a new
technique called {\em controlled prefix expansion}.
Controlled prefix expansion, together with optimization
techniques based on dynamic programming, can be used to
improve the speed of the best known IP lookup
algorithms by at least a factor of two. When applied to
trie search, our techniques provide a range of
algorithms whose performance can be tuned. For example,
with 1 MB of L2 cache, trie search of the MaeEast
database with 38,000 prefixes can be done in a worst
case search time of 181 nsec, a worst case
insert/delete time of 2.5 msec, and an average
insert/delete time of 4 usec. Our actual experiments
used 512 KB L2 cache to obtain a worst-case search time
of 226 nsec, a worst-case worst case insert/delete time
of 2.5 msec and an average insert/delete time of 4
usec. We also describe how our techniques can be used
to improve the speed of binary search on prefix lengths
to provide a scalable solution for IPv6. Our approach
to algorithm design is based on measurements using the
VTune tool on a Pentium to obtain dynamic clock cycle
counts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paxson:1998:CMP,
author = "Vern Paxson",
title = "On calibrating measurements of packet transit times",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "11--21",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss the problem of detecting errors in
measurements of the total delay experienced by packets
transmitted through a wide-area network. We assume that
we have measurements of the transmission times of a
group of packets sent from an originating host, {\em
A}, and a corresponding set of measurements of their
arrival times at their destination host, {\em B},
recorded by two separate clocks. We also assume that we
have a similar series of measurements of packets sent
from $B$ to $A$ (as might occur when recording a TCP
connection), but we do not assume that the clock at $A$
is synchronized with the clock at {\em B}, nor that
they run at the same frequency. We develop robust
algorithms for detecting abrupt adjustments to either
clock, and for estimating the relative skew between the
clocks. By analyzing a large set of measurements of
Internet TCP connections, we find that both clock
adjustments and relative skew are sufficiently common
that failing to detect them can lead to potentially
large errors when analyzing packet transit times. We
further find that synchronizing clocks using a network
time protocol such as NTP does not free them from such
errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1998:MCP,
author = "Randolph Y. Wang and Arvind Krishnamurthy and Richard
P. Martin and Thomas E. Anderson and David E. Culler",
title = "Modeling communication pipeline latency",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "22--32",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study how to minimize the latency of
a message through a network that consists of a number
of store-and-forward stages. This research is
especially relevant for today's low overhead
communication systems that employ dedicated processing
elements for protocol processing. We develop an
abstract pipeline model that reveals a crucial
performance tradeoff involving the effects of the
overhead of the bottleneck stage and the bandwidth of
the remaining stages. We exploit this tradeoff to
develop a suite of fragmentation algorithms designed to
minimize message latency. We also provide an
experimental methodology that enables the construction
of customized pipeline algorithms that can adapt to the
specific system characteristics and application
workloads. By applying this methodology to the
Myrinet-GAM system, we have improved its latency by up
to 51\%. Our theoretical framework is also applicable
to pipelined systems beyond the context of high speed
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Voelker:1998:ICP,
author = "Geoffrey M. Voelker and Eric J. Anderson and Tracy
Kimbrel and Michael J. Feeley and Jeffrey S. Chase and
Anna R. Karlin and Henry M. Levy",
title = "Implementing cooperative prefetching and caching in a
globally-managed memory system",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "33--43",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents {\em cooperative prefetching and
caching\/} --- the use of network-wide global resources
(memories, CPUs, and disks) to support prefetching and
caching in the presence of hints of future demands.
Cooperative prefetching and caching effectively unites
disk-latency reduction techniques from three lines of
research: prefetching algorithms, cluster-wide memory
management, and parallel I/O. When used together, these
techniques greatly increase the power of prefetching
relative to a conventional (non-global-memory) system.
We have designed and implemented PGMS, a cooperative
prefetching and caching system, under the Digital Unix
operating system running on a 1.28 Gb/sec
Myrinet-connected cluster of DEC Alpha workstations.
Our measurements and analysis show that by using
available global resources, cooperative prefetching can
obtain significant speedups for I/O-bound programs. For
example, for a graphics rendering application, our
system achieves a speedup of 4.9 over a non-prefetching
version of the same program, and a 3.1-fold improvement
over that program using local-disk prefetching alone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenoy:1998:CDS,
author = "Prashant J. Shenoy and Harrick M. Vin",
title = "{Cello}: a disk scheduling framework for next
generation operating systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "44--55",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277871",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present the Cello disk scheduling
framework for meeting the diverse service requirements
of applications. Cello employs a two-level disk
scheduling architecture, consisting of a
class-independent scheduler and a set of class-specific
schedulers. The two levels of the framework allocate
disk bandwidth at two time-scales: the
class-independent scheduler governs the coarse-grain
allocation of bandwidth to application classes, while
the class-specific schedulers control the fine-grain
interleaving of requests. The two levels of the
architecture separate application-independent
mechanisms from application-specific scheduling
policies, and thereby facilitate the co-existence of
multiple class-specific schedulers. We demonstrate that
Cello is suitable for next generation operating systems
since: (i) it aligns the service provided with the
application requirements, (ii) it protects application
classes from one another, (iii) it is work-conserving
and can adapt to changes in work-load, (iv) it
minimizes the seek time and rotational latency overhead
incurred during access, and (v) it is computationally
efficient.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rosti:1998:IPB,
author = "Emilia Rosti and Giuseppe Serazzi and Evgenia Smirni
and Mark S. Squillante",
title = "The impact of {I/O} on program behavior and parallel
scheduling",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "56--65",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we systematically examine various
performance issues involved in the coordinated
allocation of processor and disk resources in
large-scale parallel computer systems. Models are
formulated to investigate the I/O and computation
behavior of parallel programs and workloads, and to
analyze parallel scheduling policies under such
workloads. These models are parameterized by
measurements of parallel programs, and they are solved
via analytic methods and simulation. Our results
provide important insights into the performance of
parallel applications and resource management
strategies when I/O demands are not negligible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bajaj:1998:SPU,
author = "Sandeep Bajaj and Lee Breslau and Scott Shenker",
title = "Is service priority useful in networks?",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "66--77",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A key question in the definition of new services for
the Internet is whether to provide a single class of
relaxed real-time service or multiple levels
differentiated by their delay characteristics. In that
context we pose the question: is service priority
useful in networks? We argue that, contrary to some of
our earlier work, to properly address this question one
cannot just consider raw network-centric performance
numbers, such as the delay distribution. Rather, one
must incorporate two new elements into the analysis:
the utility functions of the applications (how
application performance depends on network service),
and the adaptive nature of applications (how
applications react to changing network service). This
last point is especially crucial; modern Internet
applications are designed to tolerate a wide range of
network service quality, and they do so by adapting to
the current network conditions. Most previous
investigations of network performance have neglected to
include this adaptive behavior. In this paper we
present an analysis of service priority in the context
of audio applications embodying these two elements:
utility functions and adaptation. Our investigation is
far from conclusive. The definitive answer to the
question depends on many factors that are outside the
scope of this paper and are, at present, unknowable,
such as the burstiness of future Internet traffic and
the relative offered loads of best-effort and real-time
applications. Despite these shortcomings, our analysis
illustrates this new approach to evaluating network
design decisions, and sheds some light on the
properties of adaptive applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kalampoukas:1998:ITT,
author = "Lampros Kalampoukas and Anujan Varma and K. K.
Ramakrishnan",
title = "Improving {TCP} throughput over two-way asymmetric
links: analysis and solutions",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "78--89",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The sharing of a common buffer by TCP data segments
and acknowledgments in a network or internet has been
known to produce the effect of {\em ack compression},
often causing dramatic reductions in throughput. We
study several schemes for improving the performance of
two-way TCP traffic over asymmetric links where the
bandwidths in the two directions may differ
substantially, possibly by many orders of magnitude.
These approaches reduce the effect of ack compression
by carefully controlling the flow of data packets and
acknowledgments. We first examine a scheme where
acknowledgments are transmitted at a higher priority
than data. By analysis and simulation, we show that
prioritizing acks can lead to starvation of the
low-bandwidth connection. Next, we introduce and
analyze a connection-level backpressure mechanism
designed to limit the maximum amount of data buffered
in the outgoing IP queue of the source of the
low-bandwidth connection. We show that this approach,
while minimizing the queueing delay for acks, results
in unfair bandwidth allocation on the slow link.
Finally, our preferred solution separates the acks from
data packets in the outgoing queue, and makes use of a
connection-level bandwidth allocation mechanism to
control their bandwidth shares. We show that this
scheme overcomes the limitations of the previous
approaches, provides isolation, and enables precise
control of the connection throughputs. We present
analytical models of the dynamic behavior of each of
these approaches, derive closed-form expressions for
the expected connection efficiencies in each case, and
validate them with simulation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raman:1998:ABG,
author = "Suchitra Raman and Steven McCanne and Scott Shenker",
title = "Asymptotic behavior of global recovery in {SRM}",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "90--99",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The development and deployment of a large-scale,
wide-area multicast infrastructure in the Internet has
enabled a new family of multi-party, collaborative
applications. Several of these applications, such as
multimedia slide shows, shared whiteboards, and
large-scale multi-player games, require {\em
reliable\/} multicast transport, yet the underlying
multicast infrastructure provides only a best-effort
delivery service. A difficult challenge in the design
of efficient protocols that provide reliable service on
top of the best-effort multicast service is to maintain
acceptable performance as the protocol {\em scales\/}
to very large session sizes distributed across the wide
area. The Scalable, Reliable Multicast (SRM) protocol
[6] is a receiver-driven scheme based on negative
acknowledgments (NACKs) reliable multicast protocol
that uses randomized timers to limit the amount of
protocol overhead in the face of large multicast
groups, but the behavior of SRM at extremely large
scales is not well-understood. In this paper, we use
analysis and simulation to investigate the scaling
behavior of global loss recovery in SRM. We study the
protocol's control-traffic overhead as a function of
group size for various topologies and protocol
parameters, on a set of simple, representative
topologies --- the cone (a variant of a clique), the
linear chain, and the binary tree. We find that this
overhead, as a function of group size, depends strongly
on the topology: for the cone, it is always linear; for
the chain, it is between constant and logarithmic; and
for the tree, it is between constant and linear.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boxma:1998:BPF,
author = "O. J. Boxma and V. Dumas",
title = "The busy period in the fluid queue",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "100--110",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a fluid queue fed by $N$ on/off sources. It
is assumed that the silence periods of the sources are
exponentially distributed, whereas the activity periods
are generally distributed. The inflow rate of each
source, when active, is at least as large as the
outflow rate of the buffer. We make two contributions
to the performance analysis of this model. Firstly, we
determine the Laplace--Stieltjes transforms of the
distributions of the busy periods that start with an
active period of source $ i, i = 1, \ldots {}, N$, as
the unique solution in $ [0, 1]^N$ of a set of $N$
equations. Thus we also find the Laplace--Stieltjes
transform of the distribution of an arbitrary busy
period. Secondly, we relate the tail behaviour of the
busy period distributions to the tail behaviour of the
activity period distributions. We show that the tails
of all busy period distributions are regularly varying
of index $ - \nu $ iff the heaviest of the tails of the
activity period distributions are regularly varying of
index $ - \nu $ We provide explicit equivalents of the
former in terms of the latter, which show that the
contribution of the sources with lighter associated
tails is equivalent to a simple reduction of the
outflow rate. These results have implications for the
performance analysis of networks of fluid queues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:1998:TLP,
author = "Guang-Liang Li and Jun-Hong Cui and Bo Li and
Fang-Ming Li",
title = "Transient loss performance of a class of finite buffer
queueing systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "111--120",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance-oriented studies typically rely on the
assumption that the stochastic process modeling the
phenomenon of interest is already in steady state. This
assumption is, however, not valid if the life cycle of
the phenomenon under study is not large enough, since
usually a stochastic process cannot reach steady state
unless time evolves towards infinity. Therefore, it is
important to address performance issues in transient
state. Previous work in transient analysis of queueing
systems usually focuses on Markov models. This paper,
in contrast, presents an analysis of transient loss
performance for a class of finite buffer queueing
systems that are not necessarily Markovian. We obtain
closed-form transient loss performance measures. Based
on the loss measures, we compare transient loss
performance against steady-state loss performance and
examine how different assumptions on the arrival
process will affect transient loss behavior of the
queueing system. We also discuss how to guarantee
transient loss performance. The analysis is illustrated
with numerical results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "queueing systems; stochastic modeling; transient loss
performance",
}
@Article{McKinnon:1998:QBA,
author = "Martin W. McKinnon and George N. Rouskas and Harry G.
Perros",
title = "Queueing-based analysis of broadcast optical
networks",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "121--130",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277888",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider broadcast WDM networks operating with
schedules that mask the transceiver tuning latency. We
develop and analyze a queueing model of the network in
order to obtain the queue-length distribution and the
packet loss probability at the transmitting and
receiving side of the nodes. The analysis is carried
out assuming finite buffer sizes, non-uniform
destination probabilities and two-state MMBP traffic
sources; the latter naturally capture the notion of
burstiness and correlation, two important
characteristics of traffic in high-speed networks. We
present results which establish that the performance of
the network is a complex function of a number of system
parameters, including the load balancing and scheduling
algorithms, the number of available channels, and the
buffer capacity. We also show that the behavior of the
network in terms of packet loss probability as these
parameters are varied cannot be predicted without an
accurate analysis. Our work makes it possible to study
the interactions among the system parameters, and to
predict, explain and fine tune the performance of the
network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "discrete-time queueing networks; Markov modulated
Bernoulli process; optical networks; wavelength
division multiplexing",
}
@Article{Bavier:1998:PME,
author = "Andy C. Bavier and A. Brady Montz and Larry L.
Peterson",
title = "Predicting {MPEG} execution times",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "131--140",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277892",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on a set of experiments that
measure the amount of CPU processing needed to decode
MPEG-compressed video in software. These experiments
were designed to discover indicators that could be used
to predict how many cycles are required to decode a
given frame. Such predictors can be used to do more
accurate CPU scheduling. We found that by considering
both frame type and size, it is possible to construct a
linear model of MPEG decoding with $ R^2 $ values of
0.97 and higher. Moreover, this model can be used to
predict decoding times at both the frame and packet
level that are almost always accurate to within 25\% of
the actual decode times. This is a surprising result
given the large variability in MPEG decoding times, and
suggests that it is feasible to design systems that
make quality of service guarantees for MPEG-encoded
video.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gribble:1998:SSF,
author = "Steven D. Gribble and Gurmeet Singh Manku and Drew
Roselli and Eric A. Brewer and Timothy J. Gibson and
Ethan L. Miller",
title = "Self-similarity in file systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "141--150",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We demonstrate that high-level file system events
exhibit self-similar behaviour, but only for short-term
time scales of approximately under a day. We do so
through the analysis of four sets of traces that span
time scales of milliseconds through months, and that
differ in the trace collection method, the filesystems
being traced, and the chronological times of the
tracing. Two sets of detailed, short-term file system
trace data are analyzed; both are shown to have
self-similar like behaviour, with consistent Hurst
parameters (a measure of self-similarity) for all file
system traffic as well as individual classes of file
system events. Long-term file system trace data is then
analyzed, and we discover that the traces' high
variability and self-similar behaviour does not persist
across time scales of days, weeks, and months. Using
the short-term trace data, we show that sources of file
system traffic exhibit ON/OFF source behaviour, which
is characterized by highly variably lengthened bursts
of activity, followed by similarly variably lengthened
periods of inactivity. This ON/OFF behaviour is used to
motivate a simple technique for synthesizing a stream
of events that exhibit the same self-similar short-term
behaviour as was observed in the file system traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:1998:GRW,
author = "Paul Barford and Mark Crovella",
title = "Generating representative {Web} workloads for network
and server performance evaluation",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "151--160",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277897",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One role for workload generation is as a means for
understanding how servers and networks respond to
variation in load. This enables management and capacity
planning based on current and projected usage. This
paper applies a number of observations of Web server
usage to create a realistic Web workload generation
tool which mimics a set of real users accessing a
server. The tool, called
Surge (Scalable URL Reference Generator) generates
references matching empirical measurements of (1)
server file size distribution; (2) request size
distribution; (3) relative file popularity; (4)
embedded file references; (5) temporal locality of
reference; and (6) idle periods of individual users.
This paper reviews the essential elements required in
the generation of a representative Web workload. It
also addresses the technical challenges to satisfying
this large set of simultaneous constraints on the
properties of the reference stream, the solutions we
adopted, and their associated accuracy. Finally, we
present evidence that Surge exercises servers in a
manner significantly different from other Web server
benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ji:1998:PMM,
author = "Minwen Ji and Edward W. Felten and Kai Li",
title = "Performance measurements for multithreaded programs",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "161--170",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multithreaded programming is an effective way to
exploit concurrency, but it is difficult to debug and
tune a highly threaded program. This paper describes a
performance tool called Tmon for monitoring, analyzing
and tuning the performance of multithreaded programs.
The performance tool has two novel features: it uses
`thread waiting time' as a measure and constructs
thread waiting graphs to show thread dependencies and
thus performance bottlenecks, and it identifies
`semi-busy-waiting' points where CPU cycles are wasted
in condition checking and context switching. We have
implemented the Tmon tool and, as a case study, we have
used it to measure and tune a heavily threaded file
system. We used four workloads to tune different
aspects of the file system. We were able to improve the
file system bandwidth and throughput significantly. In
one case, we were able to improve the bandwidth by two
orders of magnitude.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:1998:MES,
author = "Dongming Jiang and Jaswinder Pal Singh",
title = "A methodology and an evaluation of the {SGI Origin
2000}",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "171--181",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As hardware-coherent, distributed shared memory (DSM)
multiprocessing becomes popular commercially, it is
important to evaluate modern realizations to understand
how they perform and scale for a range of interesting
applications and to identify the nature of the key
bottlenecks. This paper evaluates the SGI Origin
2000---the machine that perhaps has the most aggressive
communication architecture of the recent cache-coherent
offerings---and, in doing so, articulates a sound
methodology for evaluating real systems. We examine
data access and synchronization microbenchmarks;
speedups for different application classes, problem
sizes and scaling models; detailed interactions and
time breakdowns using performance tools; and the impact
of special hardware support. We find that overall the
Origin appears to deliver on the promise of
cache-coherent shared address space multiprocessing, at
least at the 32-processor scale we examine. The machine
is quite easy to program for performance and has fewer
organizational problems than previous systems we have
examined. However, some important trouble spots are
also identified, especially related to contention that
is apparently caused by engineering decisions to share
resources among processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shriver:1998:ABM,
author = "Elizabeth Shriver and Arif Merchant and John Wilkes",
title = "An analytic behavior model for disk drives with
readahead caches and request reordering",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "182--191",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern disk drives read-ahead data and reorder
incoming requests in a workload-dependent fashion. This
improves their performance, but makes simple analytical
models of them inadequate for performance prediction,
capacity planning, workload balancing, and so on. To
address this problem we have developed a new analytic
model for disk drives that do readahead and request
reordering. We did so by developing performance models
of the disk drive components (queues, caches, and the
disk mechanism) and a workload transformation technique
for composing them. Our model includes the effects of
workload-specific parameters such as request size and
spatial locality. The result is capable of predicting
the behavior of a variety of real-world devices to
within 17\% across a variety of workloads and disk
drives.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fraguela:1998:MSA,
author = "Basilio B. Fraguela and Ram{\'o}n Doallo and Emilio L.
Zapata",
title = "Modeling set associative caches behavior for irregular
computations",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "192--201",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While much work has been devoted to the study of cache
behavior during the execution of codes with regular
access patterns, little attention has been paid to
irregular codes. An important portion of these codes
are scientific applications that handle compressed
sparse matrices. In this work a probabilistic model for
the prediction of the number of misses on a $K$-way
associative cache memory considering sparse matrices
with a uniform or banded distribution is presented. Two
different irregular kernels are considered: the sparse
matrix-vector product and the transposition of a sparse
matrix. The model was validated with simulations on
synthetic uniform matrices and banded matrices from the
Harwell-Boeing collection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache performance; irregular computation;
probabilistic model; sparse matrix",
}
@Article{Jiang:1998:IRF,
author = "Tianji Jiang and Mostafa H. Ammar and Ellen W.
Zegura",
title = "Inter-receiver fairness: a novel performance measure
for multicast {ABR} sessions",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "202--211",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a multicast ABR service, a connection is typically
restricted to the rate allowed on the bottleneck link
in the distribution tree from the source to the set of
receivers. Because of this, receivers in the connection
can experience {\em inter-receiver unfairness}, when
the preferred operating rates of the receivers are
different. In this paper we explore the issue of
improving the inter-receiver fairness in a multicast
ABR connection by allowing the connection to operate at
a rate higher than what is allowed by the multicast
tree's bottleneck link. Since this can result in cell
loss to some receivers, we operate with the knowledge
of each receiver's application-specific loss tolerance.
The multicast connection rate is not allowed to
increase beyond the point where the cell loss on a path
to a receiver exceeds this receiver's loss tolerance.
Based on these ideas we develop an inter-receiver
fairness measure and a technique for determining the
rate that maximizes this measure. We show possible
switch algorithms that can be used to convey the
parameters needed to compute the function to the
connection's source. In addition we develop a global
network measure that helps us assess the effect of
increasing inter-receiver fairness on the total network
delivered throughput. We also briefly explore improving
inter-receiver fairness through the use of multiple
virtual circuits to carry traffic for a single
multicast session. A set of examples demonstrate the
use of the inter-receiver fairness concept in various
network scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Courcoubetis:1998:AEL,
author = "Costas Courcoubetis and Vasilios A. Siris and George
D. Stamoulis",
title = "Application and evaluation of large deviation
techniques for traffic engineering in broadband
networks",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "212--221",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Accurate yet simple methods for traffic engineering
are important for efficient dimensioning of broadband
networks. The goal of this paper is to apply and
evaluate large deviation techniques for traffic
engineering. In particular, we employ the recently
developed theory of {\em effective bandwidths}, where
the effective bandwidth depends not only on the
statistical characteristics of the traffic stream, but
also on a link's operating point through two
parameters, the {\em space\/} and {\em time\/}
parameters, which are computed using the {\em many
sources asymptotic}. We show that this effective
bandwidth definition can accurately quantify resource
usage. Furthermore, we estimate and interpret values of
the space and time parameters for various mixes of real
traffic demonstrating how these values can be used to
clarify the effects on the link performance of the time
scales of burstiness of the traffic input, of the link
parameters (capacity and buffer), and of traffic
control mechanisms, such as traffic shaping. Our
approach relies on off-line analysis of traffic traces,
the granularity of which is determined by the time
parameter of the link, and our experiments involve a
large set of MPEG-1 compressed video and Internet Wide
Area Network (WAN) traces, as well as modeled voice
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ATM; broadband networks; effective bandwidths; large
deviations; traffic engineering",
}
@Article{Neidhardt:1998:CRT,
author = "Arnold L. Neidhardt and Jonathan L. Wang",
title = "The concept of relevant time scales and its
application to queuing analysis of self-similar traffic
(or is {Hurst} naughty or nice?)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "222--232",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent traffic analyses from various packet networks
have shown the existence of long-range dependence in
bursty traffic. In evaluating its impact on queuing
performance, earlier investigations have noted how the
presence of long-range dependence, or a high value of
the Hurst parameter $H$, is often associated with
surprisingly large queue sizes. As a result, a common
impression has been created of expecting queuing
performance to be worse as $H$ increases, but this
impression can be misleading. In fact, there are
examples in which larger values of $H$ are associated
with smaller queues. So the question is how can one
tell whether queuing performance would improve or
degrade as $H$ rises? In this paper, we show that the
relative queuing performance can be assessed by
identifying a couple of time scales. First, in
comparing a high-$H$ process with a low-$H$ process,
there is a unique time scale $ t_m$ at which the
variances of the two processes match (assuming exact,
second-order self similarity for both processes).
Second, there are time scales $ t_{qi}$ that are most
relevant for queuing the arrivals of process $i$. If
both of the queuing scales $ t_{qi}$ exceed the
variance-matching scale $ t_m$, then the high-$H$ queue
is worse; if the queuing scales are smaller, then the
low-$H$ queue is worse. However, no firm prediction can
be made in the remaining case of $ t_m$ falling between
the two queuing scales. Numerical examples are given to
demonstrate our results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arpaci-Dusseau:1998:SII,
author = "Andrea C. Arpaci-Dusseau and David E. Culler and Alan
M. Mainwaring",
title = "Scheduling with implicit information in distributed
systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "233--243",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em Implicit coscheduling\/} is a distributed
algorithm for time-sharing communicating processes in a
cluster of workstations. By observing and reacting to
implicit information, local schedulers in the system
make independent decisions that dynamically coordinate
the scheduling of communicating processes. The
principal mechanism involved is {\em two-phase
spin-blocking\/}: a process waiting for a message
response spins for some amount of time, and then
relinquishes the processor if the response does not
arrive. In this paper, we describe our experience
implementing implicit coscheduling on a cluster of 16
UltraSPARC I workstations; this has led to
contributions in three main areas. First, we more
rigorously analyze the two-phase spin-block algorithm
and show that spin time should be increased when a
process is receiving messages. Second, we present
performance measurements for a wide range of synthetic
benchmarks and for seven Split-C parallel applications.
Finally, we show how implicit coscheduling behaves
under different job layouts and scaling, and discuss
preliminary results for achieving fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:1998:SPS,
author = "Thu D. Nguyen and John Zahorjan",
title = "Scheduling policies to support distributed {$3$D}
multimedia applications",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "244--253",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277930",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of scheduling the rendering
component of 3D multimedia applications on a cluster of
workstations connected via a local area network. Our
goal is to meet a periodic real-time constraint. In
abstract terms, the problem we address is how best to
schedule tasks with unpredictable service times on
distinct processing nodes so as to meet a real-time
deadline, given that all communication among nodes
entails some (possibly large) overhead. We consider two
distinct classes of schemes, {\em static}, in which
task reallocations are scheduled to occur at specific
times, and {\em dynamic}, in which reallocations are
triggered by some processor going idle. For both
classes we further examine both {\em global\/}
reassignments, in which all nodes are rescheduled at a
rescheduling moment, and {\em local\/} reassignments,
in which only a subset of the nodes engage in
rescheduling at any one time. We show that global
dynamic policies work best over a range of
parameterizations appropriate to such systems. We
introduce a new policy, Dynamic with Shadowing, that
places a small number of tasks in the schedules of
multiple workstations to reduce the amount of
communication required to complete the schedule. This
policy is shown to dominate the other alternatives
considered over most of the parameter space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moritz:1998:LMN,
author = "Csaba Andras Moritz and Matthew I. Frank",
title = "{LoGPC}: modeling network contention in
message-passing programs",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "254--263",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277933",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many real applications, for example those with
frequent and irregular communication patterns or those
using large messages, network contention and contention
for message processing resources can be a significant
part of the total execution time. This paper presents a
new cost model, called LoGPC, that extends the LogP [9]
and LogGP [4] models to account for the impact of
network contention and network interface DMA behavior
on the performance of message-passing programs. We
validate LoGPC by analyzing three applications
implemented with Active Messages [11, 18] on the MIT
Alewife multiprocessor. Our analysis shows that network
contention accounts for up to 50\% of the total
execution time. In addition, we show that the impact of
communication locality on the communication costs is at
most a factor of two on Alewife. Finally, we use the
model to identify tradeoffs between synchronous and
asynchronous message passing styles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barve:1998:MOT,
author = "Rakesh Barve and Elizabeth Shriver and Phillip B.
Gibbons and Bruce K. Hillyer and Yossi Matias and
Jeffrey Scott Vitter",
title = "Modeling and optimizing {I/O} throughput of multiple
disks on a bus (summary)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "264--265",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277936",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For a wide variety of computational tasks, disk I/O
continues to be a serious obstacle to high performance.
The focus of the present paper is on systems that use
multiple disks per SCSI bus. We measured the
performance of concurrent random I/Os, and observed
bus-related phenomena that impair performance. We
describe these phenomena, and present a new I/O
performance model that accurately predicts the average
bandwidth achieved by a heavy workload of random reads
from disks on a SCSI bus. This model, although
relatively simple, predicts performance on several
platforms to within 12\% for I/O sizes in the range
16-128 KB. We describe a technique to improve the I/O
bandwidth by 10-20\% for random-access workloads that
have large I/Os and high concurrency. This technique
increases the percentage of disk head positioning time
that is overlapped with data transfers, and increases
the percentage of transfers that occur at bus
bandwidth, rather than at disk-head bandwidth.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blumofe:1998:PWS,
author = "Robert D. Blumofe and Dionisios Papadopoulos",
title = "The performance of work stealing in multiprogrammed
environments (extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "266--267",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277939",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crovella:1998:TAD,
author = "Mark E. Crovella and Mor Harchol-Balter and Cristina
D. Murta",
title = "Task assignment in a distributed system (extended
abstract): improving performance by unbalancing load",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "268--269",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277942",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of task assignment in a
distributed system (such as a distributed Web server)
in which task sizes are drawn from a heavy-tailed
distribution. Many task assignment algorithms are based
on the heuristic that balancing the load at the server
hosts will result in optimal performance. We show this
conventional wisdom is less true when the task size
distribution is heavy-tailed (as is the case for Web
file sizes). We introduce a new task assignment policy,
called Size Interval Task Assignment with Variable Load
(SITA-V). SITA-V purposely operates the server hosts at
different loads, and directs smaller tasks to the
lighter-loaded hosts. The result is that SITA-V
provably decreases the mean task slowdown by
significant factors (up to 1000 or more) where the more
heavy-tailed the workload, the greater the improvement
factor. We evaluate the tradeoff between improvement in
slowdown and increase in waiting time in a system using
SITA-V, and show conditions under which SITA-V
represents a particularly appealing policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Manley:1998:SSS,
author = "Stephen Manley and Margo Seltzer and Michael Courage",
title = "A self-scaling and self-configuring benchmark for
{Web} servers (extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "270--271",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277945",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "World Wide Web clients and servers have become some of
the most important applications in our computing base,
and we need realistic and meaningful ways of measuring
their performance. Current server benchmarks do not
capture the wide variation that we see in servers and
are not accurate in their characterization of web
traffic. In this paper, we present a self-configuring,
scalable benchmark that generates a server benchmark
load based on actual server loads. In contrast to other
web benchmarks, our benchmark focuses on request
latency instead of focusing exclusively on throughput
sensitive metrics. We present our new benchmark
hBench:Web, and demonstrate how it accurately models
the load of an actual server. The benchmark can also be
used to assess how continued growth or changes in the
workload will affect future performance. Using existing
log histories, we now that these predictions are
sufficiently realistic to provide insight into
tomorrow's Web performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmark; CGI; scaling; self-configuring; World Wide
Web",
}
@Article{Rousskov:1998:PCP,
author = "Alex Rousskov and Valery Soloviev",
title = "On performance of caching proxies (extended
abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "272--273",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277946",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Waldby:1998:TAE,
author = "J. Waldby and U. Madhow and T. V. Lakshman",
title = "Total acknowledgements (extended abstract): a robust
feedback mechanism for end-to-end congestion control",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "274--275",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277947",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "End-to-end data transport protocols have two main
functions: error recovery and congestion control. The
information required by the sender to perform these
functions is provided by acknowledgements (ACKs) from
the receiver. The Internet transport protocol, TCP/IP,
uses cumulative acknowledgements (CACKs), which provide
a robust but minimal mechanism for error recovery which
is inadequate for heterogeneous networks with random
loss. Furthermore, TCP's congestion control mechanism
is based on counting ACKs, and is therefore vulnerable
to loss of ACKs on the reverse path, particularly when
the latter may be slower than the forward path, as in
asymmetric networks. The contributions of this paper
are as follows:(a) We show that a simple enhancement of
CACK provides sufficient information for end-to-end
{\em congestion control}. We term this ACK format total
ACKs (TACKs).(b) We devise a novel ACK format that uses
TACKs for congestion control, and negative ACKs (NACKs)
for efficient error recovery. Typically, the main
concern with NACKs is that of robustness to ACK loss,
and we address this using an implementation that
provides enough redundancy to provide such
robustness.(c) We use the TACK+NACK acknowledgement
format as the basis for a new transport protocol that
provides efficient error recovery and dynamic
congestion control. The protocol provides large
performance gains over TCP in an environment with
random loss, and is robust against loss of ACKs in the
reverse path. In particular, the protocol gives high
throughput upto a designed level of random loss,
independent of the bandwidth-delay product. This is in
contrast to TCP, whose throughput deteriorates
drastically if the random loss probability is higher
than the inverse square of the bandwidth-delay
product.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Willis:1998:PCR,
author = "Thomas E. Willis and George B. {Adams III}",
title = "Portable, continuous recording of complete computer
behavior with low overhead (extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "276--277",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277948",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acharya:1998:UIM,
author = "Anurag Acharya and Sanjeev Setia",
title = "Using idle memory for data-intensive computations
(extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "278--279",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277949",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aboutabl:1998:TDD,
author = "Mohamed Aboutabl and Ashok Agrawala and Jean-Dominique
Decotignie",
title = "Temporally determinate disk access (extended
abstract): an experimental approach",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "280--281",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marsan:1998:MGS,
author = "M. Ajmone Marsan and G. Balbo and G. Conte and S.
Donatelli and G. Franceschinis",
title = "Modelling with {Generalized Stochastic Petri Nets}",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "2--2",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.581193",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bause:1998:SPN,
author = "Falko Bause and Pieter S. Kritzinger",
title = "Stochastic {Petri} Nets: An Introduction to the
Theory",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "2--3",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.581194",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lindemann:1998:PMD,
author = "Christoph Lindemann",
title = "Performance Modelling with Deterministic and
Stochastic {Petri} Nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "3--3",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.581195",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lindemann:1998:SIS,
author = "Christoph Lindemann",
title = "Special issue on stochastic {Petri} nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "4--4",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288201",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchholz:1998:GHG,
author = "Peter Buchholz and Peter Kemper",
title = "On generating a hierarchy for {GSPN} analysis",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "5--14",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288202",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines the (semi) automatic generation of
a hierarchical structure for generalized stochastic
Petri nets (GSPNs). The idea is to partition a GSPN
automatically into a set of components with
asynchronous communication. Net level results obtained
by invariant computation for these subnets are used to
define a macro description of the internal state. This
yields a hierarchical structure which is exploited in
several efficient analysis algorithms. These algorithms
include reachability set/graph generation, structured
numerical analysis techniques and approximation
techniques based on decomposition and aggregation. A
GSPN model of an existing production cell and its
digital control is analyzed to demonstrate usefulness
of the approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "generalized stochastic Petri nets; hierarchical
structure; Kronecker algebra; Markov chain analysis
techniques",
}
@Article{Fricks:1998:ANM,
author = "Ricardo M. Fricks and Antonio Puliafito and Mikl{\'o}s
Telek and Kishor S. Trivedi",
title = "Applications of non-{Markovian} stochastic {Petri}
nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "15--27",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288204",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Petri nets represent a powerful paradigm for modeling
parallel and distributed systems. Parallelism and
resource contention can easily be captured and time can
be included for the analysis of system dynamic
behavior. Most popular stochastic Petri nets assume
that all firing times are exponentially distributed.
This is found to be a severe limitation in many
circumstances that require deterministic and generally
distributed firing times. This has led to a
considerable interest in studying non-Markovian models.
In this paper we specifically focus on non-Markovian
Petri nets. The analytical approach through the
solution of the underlying Markov regenerative process
is dealt with and numerical analysis techniques are
discussed. Several examples are presented and solved to
highlight the potentiality of the proposed
approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Markov regenerative processes; numerical analysis;
preemption policies; stochastic Petri nets",
}
@Article{Marsan:1998:MAS,
author = "Marco Ajmone Marsan and Rossano Gaeta",
title = "Modeling {ATM} systems with {GSPNs} and {SWNs}",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "28--37",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288208",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper overviews the work of the authors in the
field of modeling and analysis of Asynchronous Transfer
Mode (ATM) networks using Generalized Stochastic Petri
Nets (GSPN) and a special class of high-level
stochastic Petri nets known as Stochastic Well-formed
Nets (SWN). These formalisms are first shown to be
adequate tools for the development of models of ATM
systems, provided that only one timed transition is
used, together with many immediate transitions. The
only timed transition in the GSPN and SWN models
represents the ATM systems cell time, while immediate
transitions implement the ATM systems behavior. The
firing time distribution of the only timed transition
is irrelevant for the computation of several
interesting performance indices. The results, as well
as the problems, derived from the analysis of ATM
switches and Local Area Networks (LAN) that adopt the
Available Bit Rate (ABR) service category are
summarized and discussed, providing references to the
works containing the technical details.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ABR; ATM; Gauss switch; GSPN; knockout switch; LAN;
SWN",
}
@Article{Ost:1998:AWM,
author = "Alexander Ost and Boudewijn R. Haverkort",
title = "Analysis of windowing mechanisms with infinite-state
stochastic {Petri} nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "38--46",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288212",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a performance evaluation of
windowing mechanisms in world-wide web applications.
Previously, such mechanisms have been studied by means
of measurements only, however, given suitable tool
support, we show that such evaluations can also be
performed conveniently using infinite-state stochastic
Petri nets. We briefly present this class of stochastic
Petri nets as well as the approach for solving the
underlying infinite-state Markov chain using
matrix-geometric methods. We then present a model of
the TCP slow-start congestion avoidance mechanism,
subject to a (recently published) typical worldwide web
workload. The model is parameterized using measurement
data for a national connection and an overseas
connection. Our study shows how the maximum congestion
window size, the connection release timeout and the
packet loss probability influence the expected number
of buffered segments at the server, the connection
setup rate and the connection time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; matrix-geometric methods;
stochastic Petri nets; window flow control",
}
@Article{Dujmovic:1998:EES,
author = "Jozo J. Dujmovi{\'c} and Ivo Dujmovi{\'c}",
title = "Evolution and evaluation of {SPEC} benchmarks",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "2--9",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a method for quantitative evaluation of
SPEC benchmarks. The method is used for the analysis of
three generations of SPEC component-level benchmarks:
SPEC89, SPEC92, and SPEC95. Our approach is suitable
for studying (1) the redundancy between individual
benchmark programs, (2) the size, completeness, density
and granularity of benchmark suites, (3) the
distribution of benchmark programs in a program space,
and (4) benchmark suite design and evolution
strategies. The presented method can be used for
designing a universal benchmark suite as the next
generation of SPEC benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:1998:GEI,
author = "Pei Cao and Sekhar Sarukkai",
title = "{Guest Editors}' Introduction",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "10--10",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.581196",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Caceres:1998:WPC,
author = "Ram{\'o}n C{\'a}ceres and Fred Douglis and Anja
Feldmann and Gideon Glass and Michael Rabinovich",
title = "{Web} proxy caching: the devil is in the details",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "11--15",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Much work in the analysis of proxy caching has focused
on high-level metrics such as hit rates, and has
approximated actual reference patterns by ignoring
exceptional cases such as connection aborts. Several of
these low-level details have a strong impact on
performance, particularly in heterogeneous bandwidth
environments such as modem pools connected to faster
networks. Trace-driven simulation of the modem pool of
a large ISP suggests that `cookies' dramatically affect
the cachability of resources; wasted bandwidth due to
aborted connections can more than offset the savings
from cached documents; and using a proxy to keep from
repeatedly opening new TCP connections can reduce
latency more than simply caching data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamurthy:1998:PQE,
author = "Diwakar Krishnamurthy and Jerome Rolia",
title = "Predicting the {QoS} of an electronic commerce server:
those mean percentiles",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "16--22",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a case study on Quality of Service
(QoS) measures and Service Level Agreements (SLA) for
an electronic commerce server. Electronic commerce
systems typically rely on a combination of an HTTP
server and a database server that may be integrated
with other enterprise information resources. Some
interactions with these systems cause requests for
static HTML pages. Others cause significant amounts of
database processing. Response time percentiles are
well-accepted measures of QoS for such requests. In
this paper we measure the behavior of an electronic
commerce server under several controlled loads and
study response time measures for several workload
abstractions. Response time measures are captured for
individual URLs, groups of functionally related URLs,
and for sequences of URLs. We consider the utility of
these workload abstractions for providing SLA. We also
show that empirical evidence of server behavior in
conjunction with analytic modeling techniques may be
useful to predict the 90-percentile of response times
for sequence based workload classes. The model
predictions could be used to support realtime call
admission algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bangs:1998:BOS,
author = "Gaurav Bangs and Peter Druschel and Jeffrey C. Mogul",
title = "Better operating system features for faster network
servers",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "23--30",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306234",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Widely-used operating systems provide inadequate
support for large-scale Internet server applications.
Their algorithms and interfaces fail to efficiently
support either event-driven or multi-threaded servers.
They provide poor control over the scheduling and
management of machine resources, making it difficult to
provide robust and controlled service. We propose new
UNIX interfaces to improve scalability, and to provide
fine-grained scheduling and resource management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mosberger:1998:HTM,
author = "David Mosberger and Tai Jin",
title = "{\tt httperf} --- a tool for measuring {Web} server
performance",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "31--37",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306235",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes httperf, a tool for measuring web
server performance. It provides a flexible facility for
generating various HTTP workloads and for measuring
server performance. The focus of httperf is not on
implementing one particular benchmark but on providing
a robust, high-performance tool that facilitates the
construction of both micro- and macro-level benchmarks.
The three distinguishing characteristics of httperf are
its robustness, which includes the ability to generate
and sustain server overload, support for the HTTP/1.1
protocol, and its extensibility to new workload
generators and performance measurements. In addition to
reporting on the design and implementation of httperf
this paper also discusses some of the experiences and
insights gained while realizing this tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ward:1998:ISP,
author = "Amy Ward and Peter Glynn and Kathy Richardson",
title = "{Internet} service performance failure detection",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "38--43",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing complexity of computer networks and our
increasing dependence on them means enforcing
reliability requirements is both more challenging and
more critical. The expansion of network services to
include both traditional interconnect services and
user-oriented services such as the web and email has
guaranteed both the increased complexity of networks
and the increased importance of their performance. The
first step toward increasing reliability is early
detection of network performance failures. Here we
consider the applicability of statistical model
frameworks under the most general assumptions possible.
Using measurements from corporate proxy servers, we
test the framework against real world failures. The
results of these experiments show we can detect
failures, but with some tradeoff questions. The pull is
in the warning time: either we miss early warning signs
or we report some false warnings. Finally, we offer
insight into the problem of failure diagnosis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sayal:1998:SAR,
author = "Mehmet Sayal and Yuri Breitbart and Peter Scheuermann
and Radek Vingralek",
title = "Selection algorithms for replicated {Web} servers",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "44--50",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Replication of documents on geographically distributed
servers can improve both performance and reliability of
the Web service. Server selection algorithms allow Web
clients to select one of the replicated servers which
is `close' to them and thereby minimize the response
time of the Web service. Using client proxy server
traces, we compare the effectiveness of several
`proximity' metrics including the number of hops
between the client and server, the ping round trip time
and the HTTP request latency. Based on this analysis,
we design two new algorithms for selection of
replicated servers and compare their performance
against other existing algorithms. We show that the new
server selection algorithms improve the performance of
other existing algorithms on the average by 55\%. In
addition, the new algorithms improve the performance of
the existing non-replicated Web servers on average by
69\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hillingsworth:1999:SSS,
author = "Jeffrey K. Hillingsworth and Barton P. Miller",
title = "Summary of the {SIGMETRICS Symposium on Parallel and
Distributed Processing}",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "2--12",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309749",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:1999:SIS,
author = "Kenneth C. Sevcik",
title = "Special Issue on Scheduling in Multiprogrammed
Parallel Systems",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "13--13",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.581197",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Downey:1999:EGW,
author = "Allen B. Downey and Dror G. Feitelson",
title = "The elusive goal of workload characterization",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "14--29",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309750",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The study and design of computer systems requires good
models of the workload to which these systems are
subjected. Until recently, the data necessary to build
these models---observations from production
installations---were not available, especially for
parallel computers. Instead, most models were based on
assumptions and mathematical attributes that facilitate
analysis. Recently a number of supercomputer sites have
made accounting data available that make it possible to
build realistic workload models. It is not clear,
however, how to generalize from specific observations
to an abstract model of the workload. This paper
presents observations of workloads from several
parallel supercomputers and discusses modeling issues
that have caused problems for researchers in this
area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Setia:1999:IJM,
author = "Sanjeev Setia and Mark S. Squillante and Vijay K.
Naik",
title = "The impact of job memory requirements on
gang-scheduling performance",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "30--39",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Almost all previous research on gang-scheduling has
ignored the impact of real job memory requirements on
the performance of the policy. This is despite the fact
that on parallel supercomputers, because of the
problems associated with demand paging, executing jobs
are typically allocated enough memory so that their
{\em entire address space\/} is memory-resident. In
this paper, we examine the impact of job memory
requirements on the performance of gang-scheduling
policies. We first present an analysis of the
memory-usage characteristics of jobs in the production
workload on the Cray T3E at the San Diego Supercomputer
Center. We also characterize the memory usage of some
of the applications that form part of the workload on
the LLNL ASCI supercomputer. Next, we examine the issue
of long-term scheduling on MPPs, i.e., we study
policies for deciding which jobs among a set of
competing jobs should be allocated memory and thus
should be allowed to execute on the processors of the
system. Using trace-driven simulation, we evaluate the
impact of using different long-term scheduling policies
on the overall performance of Distributed Hierarchical
Control (DHC), a gang-scheduling policy that has been
studied extensively in the research literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chan:1999:EPJ,
author = "Yuet-Ning Chan and Sivarama P. Dandamudi and
Shikharesh Majumdar",
title = "Experiences with parallel job scheduling on a
transputer system",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "40--51",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both time and space sharing strategies have been
proposed for job scheduling in multiprogrammed parallel
systems. This paper summarizes the major observations
gained from an experimental investigation of these two
partition sharing strategies on a Transputer system. A
number of factors such as the applications and their
software architectures in the multiprogramming mix, the
partition sharing strategy, and the partition size are
varied and the resulting insights into system
performance and scheduling are presented. Space sharing
is observed to produce a superior performance in
comparison to time sharing for a number of
multiprogrammed workloads. Time sharing showed a better
performance for workloads with high variability in
process execution times, and with high rates of
interprocess communication. The relationships between
system performance and a number of workload and system
characteristics are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1999:IJA,
author = "Mark S. Squillante and David D. Yao and Li Zhang",
title = "The impact of job arrival patterns on parallel
scheduling",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "52--59",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an initial analysis of the
job arrival patterns from a real parallel computing
system and we develop a class of traffic models to
characterize these arrival patterns. Our analysis of
the job arrival data illustrates traffic patterns that
exhibit heavy-tail behavior and other characteristics
which are quite different from the arrival processes
used in previous studies of parallel scheduling. We
then investigate the impact of these arrival traffic
patterns on the performance of parallel space-sharing
scheduling strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dowdy:1999:SIH,
author = "L. W. Dowdy and E. Rosti and G. Serazzi and E.
Smirni",
title = "Scheduling issues in high-performance computing",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "60--69",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider the problem of scheduling
computational resources across a range of
high-performance systems, from tightly coupled parallel
systems to loosely coupled ones like networks of
workstations and geographically dispersed
meta-computing environments. We review the role of
architecture issues in the choice of scheduling
discipline and we present a selected set of policies
that address different aspects of the scheduling
problem. This discussion serves as the motivation for
addressing the success of academic research in
scheduling as well as its common criticisms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ribeiro:1999:SNL,
author = "Vinay J. Ribeiro and Rudolf H. Riedi and Matthew S.
Crouse and Richard G. Baraniuk",
title = "Simulation of {nonGaussian} long-range-dependent
traffic using wavelets",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "1--12",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301475",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:1999:BEC,
author = "Wei Zhao and Satish K. Tripathi",
title = "Bandwidth-efficient continuous media streaming through
optimal multiplexing",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "13--22",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; bandwidth allocation; feasible
region; multimedia streaming; multiplexing;
quality-of-service; temporal smoothing; transmission
scheduling",
}
@Article{Kumar:1999:ESS,
author = "Sanjeev Kumar and Dongming Jiang and Rohit Chandra and
Jaswinder Pal Singh",
title = "Evaluating synchronization on shared address space
multiprocessors: methodology and performance",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "23--34",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acharya:1999:AUI,
author = "Anurag Acharya and Sanjeev Setia",
title = "Availability and utility of idle memory in workstation
clusters",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "35--46",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kaplan:1999:TRV,
author = "Scott F. Kaplan and Yannis Smaragdakis and Paul R.
Wilson",
title = "Trace reduction for virtual memory simulations",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "47--58",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301479",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Douceur:1999:LSS,
author = "John R. Douceur and William J. Bolosky",
title = "A large-scale study of file-system contents",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "59--70",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; directory hierarchy; file-system
contents; static data snapshot; workload
characterization",
}
@Article{Martin:1999:NSH,
author = "Richard P. Martin and David E. Culler",
title = "{NFS} sensitivity to high performance networks",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "71--82",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barve:1999:MOT,
author = "Rakesh Barve and Elizabeth Shriver and Phillip B.
Gibbons and Bruce K. Hillyer and Yossi Matias and
Jeffrey Scott Vitter",
title = "Modeling and optimizing {I/O} throughput of multiple
disks on a bus",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "83--92",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sethuraman:1999:OSS,
author = "Jay Sethuraman and Mark S. Squillante",
title = "Optimal stochastic scheduling in multiclass parallel
queues",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "93--102",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301483",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varki:1999:MVT,
author = "Elizabeth Varki",
title = "Mean value technique for closed fork-join networks",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "103--112",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Franaszek:1999:MFS,
author = "Peter A. Franaszek and Philip Heidelberger and Michael
Wazlowski",
title = "On management of free space in compressed memory
systems",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "113--121",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smaragdakis:1999:ESE,
author = "Yannis Smaragdakis and Scott Kaplan and Paul Wilson",
title = "{EELRU}: simple and effective adaptive page
replacement",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "122--133",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1999:ESP,
author = "Donghee Lee and Jongmoo Choi and Jong-Hun Kim and Sam
H. Noh and Sang Lyul Min and Yookun Cho and Chong Sang
Kim",
title = "On the existence of a spectrum of policies that
subsumes the least recently used ({LRU}) and least
frequently used ({LFU}) policies",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "134--143",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301487",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ludwig:1999:MLT,
author = "Reiner Ludwig and Bela Rathonyi and Almudena Konrad
and Kimberly Oden and Anthony Joseph",
title = "Multi-layer tracing of {TCP} over a reliable wireless
link",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "144--154",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GSM; measurement tools; TCP; wireless",
}
@Article{Anjum:1999:BDT,
author = "Farooq Anjum and Leandros Tassiulas",
title = "On the behavior of different {TCP} algorithms over a
wireless channel with correlated packet losses",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "155--165",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sripanidkulchai:1999:TPV,
author = "Kunwadee Sripanidkulchai and Andy Myers and Hui
Zhang",
title = "A third-party value-added network service approach to
reliable multicast",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "166--177",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fan:1999:WPB,
author = "Li Fan and Pei Cao and Wei Lin and Quinn Jacobson",
title = "{Web} prefetching between low-bandwidth clients and
proxies: potential and performance",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "178--187",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:1999:PEH,
author = "Paul Barford and Mark Crovella",
title = "A performance evaluation of hyper text transfer
protocols",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "188--197",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:1999:HRM,
author = "Huican Zhu and Ben Smith and Tao Yang",
title = "Hierarchical resource management for {Web} server
clusters with dynamic content",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "198--199",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liao:1999:AGS,
author = "Cheng Liao and Margaret Martonosi and Douglas W.
Clark",
title = "An adaptive globally-synchronizing clock algorithm and
its implementation on a {Myrinet}-based {PC} cluster",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "200--201",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.302127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chou:1999:PSD,
author = "ChengFu Chou and Leana Golubchik and John C. S. Lui",
title = "A performance study of dynamic replication techniques
in continuous media servers",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "202--203",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dovrolis:1999:RDS,
author = "Constantinos Dovrolis and Dimitrios Stiliadis",
title = "Relative differentiated services in the {Internet}:
issues and mechanisms",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "204--205",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bartels:1999:PLF,
author = "Gretta Bartels and Anna Karlin and Darrell Anderson
and Jeffrey Chase and Henry Levy and Geoffrey Voelker",
title = "Potentials and limitations of fault-based {Markov}
prefetching for virtual memory pages",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "206--207",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crowley:1999:UTS,
author = "Patrick Crowley and Jean-Loup Baer",
title = "On the use of trace sampling for architectural studies
of desktop applications",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "208--209",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhola:1999:WMH,
author = "Sumeer Bhola and Mustaque Ahamad",
title = "Workload modeling for highly interactive
applications",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "210--211",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Venkitaraman:1999:DEC,
author = "Narayanan Venkitaraman and Tae-eun Kim and Kang-Won
Lee",
title = "Design and evaluation of congestion control algorithms
in the future {Internet}",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "212--213",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elnozahy:1999:ATC,
author = "E. N. Elnozahy",
title = "Address trace compression through loop detection and
reduction",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "214--215",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "address traces; compression; control flow analysis;
traces",
}
@Article{Nahum:1999:PIW,
author = "Erich Nahum and Tsipora Barzilai and Dilip Kandlur",
title = "Performance issues in {WWW} servers",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "216--217",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ng:1999:SBE,
author = "T. S. Eugene Ng and Donpaul C. Stephens and Ion Stoica
and Hui Zhang",
title = "Supporting best-effort traffic with fair service
curve",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "218--219",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padhye:1999:TFR,
author = "Jitendra Padhye and Jim Kurose and Don Towsley and
Rajeev Koodli",
title = "A {TCP}-friendly rate adjustment protocol for
continuous media flows over best effort networks",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "220--221",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Downey:1999:UPE,
author = "Allen B. Downey",
title = "Using {\tt pathchar} to estimate {Internet} link
characteristics",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "222--223",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We evaluate pathchar, a tool that infers the
characteristics of links along an Internet path
(latency, bandwidth, queue delays). Looking at two
example paths, we identify circumstances where {\tt
pathchar} is likely to succeed, and develop techniques
to improve the accuracy of {\tt pathchar}'s estimates
and reduce the time it takes to generate them. The most
successful of these techniques is a form of adaptive
data collection that reduces the number of measurements
{\tt pathchar} needs by more than 90\% for some
links.\par
A full-length version of this paper is available from
\url{http://uuu.cs.colby.edu/~downey/pathchar}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hershko:1999:STS,
author = "Yuval Hershko and Daniel Segal and Hadas Shachnai",
title = "Self-tuning synchronization mechanisms in network
operating systems",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "224--225",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301583",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bose:1999:PEV,
author = "Pradip Bose",
title = "Performance evaluation and validation of
microprocessors",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "226--227",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301584",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance evaluation; processor design; validation",
}
@Article{Majumdar:1999:CMC,
author = "Shikharesh Majumdar and Dale Streibel and Bruce
Beninger and Brian Carroll and Neveenta Verma and Minru
Liu",
title = "Controlling memory contention on a scalable
multiprocessor-based telephone switch",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "228--229",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301585",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cervetto:1999:MBP,
author = "Eugenio Cervetto",
title = "Model-based performance analysis of an {EDP\slash
ERP}-oriented wide area network",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "230--231",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301586",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "EDP; ERP; performance modeling; performance
prediction; wide-area network",
}
@Article{Ramanathan:1999:VSA,
author = "Srinivas Ramanathan and Edward H. Perry",
title = "The value of a systematic approach to measurement and
analysis: an {ISP} case study",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "232--233",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Siebert:1999:IPD,
author = "Janet Siebert",
title = "Improving performance of data analysis in data
warehouses: a methodology and case study",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "234--235",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data analysis; data warehouse; performance; synthetic
join; VLDB",
}
@Article{Williamson:1999:SIN,
author = "Carey Williamson",
title = "Special Issue on Network Traffic Measurements and
Workload Characterization",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "2--2",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Jerkins:1999:MAI,
author = "Judith L. Jerkins and John Monroe and Jonathan L.
Wang",
title = "A measurement analysis of {Internet} traffic over
frame relay",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "3--14",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epsilon:1999:AII,
author = "Raja Epsilon and Jun Ke and Carey Williamson",
title = "Analysis of {ISP IP\slash ATM} network traffic
measurements",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "15--24",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:1999:WCW,
author = "Martin Arlitt and Rich Friedrich and Tai Jin",
title = "Workload characterization of a {Web} proxy in a cable
modem environment",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "25--36",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:1999:MWP,
author = "Paul Barford and Mark Crovella",
title = "Measuring {Web} performance in the wide area",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "37--48",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1999:SIW,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA `99)}",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "2--2",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1999:IPP,
author = "E. G. {Coffman, Jr.} and Ph. Robert and A. L.
Stolyar",
title = "The interval packing process of linear networks",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "3--4",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Caceres:1999:SII,
author = "R. C{\'a}ceres and N. G. Duffield and J. Horowitz and
F. Lo Presti and D. Towsley",
title = "Statistical inference of internal network loss and
topology",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "5--6",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340293",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of inference from end-to-end multicast
measurements has recently been proposed to find the
internal characteristics in a network. Here we describe
statistically rigorous methods for inferring link loss
rates, and their application to identifying the
underlying multicast topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epema:1999:PSS,
author = "D. H. J. Epema and J. F. C. M. de Jongh",
title = "Proportional-share scheduling in single-server and
multiple-server computing systems",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "7--10",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340295",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Proportional Share Scheduling (PSS), which is the
allocation of prespecified fractions of a certain
resource to different classes of customers, has been
studied both in the context of the allocation of
network bandwidth and of processors. Much of this work
has focused on systems with a single scheduler and when
all classes of customers are constantly backlogged. We
study the objectives and performance of PSS policies
for processor scheduling when these conditions do not
hold.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertsimas:1999:PAM,
author = "Dimitris Bertsimas and David Gamarnik and John N.
Tsitsiklis",
title = "Performance analysis of multiclass queueing networks",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "11--14",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340299",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The subject of this abstract is performance analysis
of multiclass queueing networks. The objective is to
estimate steady-state queue lengths in queueing
networks, assuming a priori that the scheduling policy
implemented brings the system to a steady state, namely
is stable. We propose a very general methodology based
on Lyapunov functions, for the performance analysis of
infinite state Markov chains and apply it specifically
to multiclass exponential type queueing networks. We
use, in particular, linear and piece-wise linear
Lyapunov function to establish certain geometric type
lower and upper bounds on the tail probabilities and
bounds on expectation of the queue lengths. The results
proposed in this paper are the first that establish
geometric type upper and lower bounds on tail
probabilities of queue lengths, for networks of such
generality. The previous results on performance
analysis can in general achieve only numerical bounds
and only on expectation and not the distribution of
queue lengths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Herzog:1999:PAG,
author = "Ulrich Herzog",
title = "Process algebras are getting mature for performance
evaluation?!",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "15--18",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340303",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Designing hardware/software systems in the traditional
way we clearly separate methods for the functional
design and performance evaluation. Beside many merits
the well known insularity-problem is one of the
consequences. Therefore, in system engineering we see a
clear trend towards an integral treatment of both
aspects. We briefly summarize research results obtained
during the last decade by embedding stochastic
processes into process algebras, an advanced concept
for the design of parallel and distributed systems. The
central objective of these Stochastic Process Algebras
is the modular and hierarchical modelling and analysis
of complex systems. A general introduction and related
references from different research groups may be found
in [1, 2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gyorfi:1999:DFC,
author = "Laszlo Gyorfi and Andras Racz and Ken Duffy and John
T. Lewis and Raymond Russell and Fergal Toomey",
title = "Distribution-free confidence intervals for measurement
of effective bandwidths",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "19--19",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340304",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Juneja:1999:SHT,
author = "Sandeep Juneja and Perwez Shahabuddin",
title = "Simulating heavy tailed processes using delayed hazard
rate twisting (extended abstract)",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "20--22",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:1999:SBQ,
author = "Zhen Liu and Don Towsley",
title = "Stochastic bounds for queueing systems with multiple
{Markov} modulated sources",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "23--23",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1999:WTM,
author = "Mark S. Squillante and David D. Yao and Li Zhang",
title = "{Web} traffic modeling and {Web} server performance
analysis",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "24--27",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bradford:1999:ESH,
author = "Jeffrey P. Bradford and Russell Quong",
title = "An empirical study on how program layout affects cache
miss rates",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "28--42",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340326",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache miss rates are quoted for a specific program,
cache configuration, and input set; the effect of
program layout on the miss rate has largely been
ignored. This paper examines the miss variation, that
is, the variation in the miss rate for instruction and
data caches resulting from randomly generated layouts;
the layouts were generated by changing the order of the
modules on the command line when linking. This analysis
is performed for several cache sizes, lines sizes,
set-associativities, input sets, compiler versions, and
optimization levels for five programs in the SPEC92
benchmark suite. Miss rates were observed that varied
from 60\% to 180\% of the mean miss rate. We did not
observe any consistently good layouts across different
parameters; in contrast, several layouts were
consistently bad. Overall, cache line size and input
set has little effect on the miss variation, while
increasing the cache size (i.e. decreasing the miss
rate), decreasing the set-associativity, or increasing
the optimization level increases the miss variation.
For a direct-mapped cache, the results in this paper
call into question the validity of using a single
layout (i) to determine the miss rate of a given
program, (ii) to determine how a given compiler
optimization affects the miss rate, and (iii) to make
architecture design decisions based on the miss rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moore:1999:ECE,
author = "Andrew Moore and Simon Crosby",
title = "An experimental configuration for the evaluation of
{CAC} algorithms",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "43--54",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Interest in Connection Admission Control (CAC)
algorithms stems from the need for a network user and a
network provider to forge an agreement on the Quality
of Service (QoS) for a new network connection.
Traditional evaluation of CAC algorithms has been
through simulation studies. We present an alternative
approach: an evaluation environment for CAC algorithms
that is based around an experimental test-rig. This
paper presents the architecture of the test-rig and an
evaluation of its performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2000:ECM,
author = "Martin Arlitt and Ludmila Cherkasova and John Dilley
and Rich Friedrich and Tai Jin",
title = "Evaluating content management techniques for {Web}
proxy caches",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "3--11",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346003",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The continued growth of the World-Wide Web and the
emergence of new end-user technologies such as cable
modems necessitate the use of proxy caches to reduce
latency, network traffic and Web server loads. Current
Web proxy caches utilize simple replacement policies to
determine which files to retain in the cache. We
utilize a trace of client requests to a busy Web proxy
in an ISP environment to evaluate the performance of
several existing replacement policies and of two new,
parameterless replacement policies that we introduce in
this paper. Finally, we introduce Virtual Caches, an
approach for improving the performance of the cache for
multiple metrics simultaneously.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conti:2000:LDA,
author = "Marco Conti and Enrico Gregori and Fabio Panzieri",
title = "Load distribution among replicated {Web} servers:
{QoS}-based approach",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "12--19",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346004",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A dominant factor for the success of an Internet based
Web service is the Quality of Service (QoS) perceived
by its users. The principal QoS attributes these users
perceive include those related to the service
`responsiveness', i.e. the service availability and
timeliness. In this paper, we argue that QoS can be
provided by distributing the processing load among
replicated Web servers, and that these servers can be
geographically distributed across the Internet. In this
context, we discuss strategies for load distribution,
and summarize a number of alternative architectures
that can implement those strategies. The principal
figure of merit we use in order to assess the
effectiveness of the load distribution strategies we
discuss is the response time experienced by the
users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "load distribution; QoS; Web server",
}
@Article{Griwodz:2000:TLP,
author = "Carsten Griwodz and Michael Liepert and Michael Zink
and Ralf Steinmetz",
title = "Tune to {Lambda} patching",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "20--26",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346006",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recent paper by Hua, Cai and Sheu [7] describes {\em
Patching\/} as a technique for reducing server load in
a true video-on-demand (TVoD) system. It is a scheme
for multicast video transmissions, which outperforms
techniques such as Batching in response time and
Piggybacking in bandwidth savings for titles of medium
popularity, and probably in user satisfaction as well.
It achieves TVoD performance by buffering part of the
requested video in the receiving end-system. In a
further study, the authors give analytical and
simulation details on optimized patching windows under
the assumptions of the Grace and Greedy patching
techniques. In our view, this does not exploit fully
the calculation that was performed in that study. We
state that temporal distance between two multicast
streams for one movie should not be determined by a
client policy or simulation. Rather, it can be
calculated by the server on a per video basis, since
the server is aware of the average request interarrival
time for each video. Since we model the request
arrivals as a Poisson process, which is defined by a
single variable that is historically called $ \lambda
$, we call this variation `$ \lambda $ Patching'.
Furthermore, we present an optimization option
`Multistream Patching' that reduces the server load
further. We accept that some near video-on-demand-like
traffic is generated with additional patch streams, and
achieve additional gains in server load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive; multicast; streaming server; video on
demand",
}
@Article{Menasec:2000:RMP,
author = "Daniel A. Menas{\'e}c and Rodrigo Fonseca and Virgilio
A. F. Almeida and Marco A. Mendes",
title = "Resource management policies for e-commerce servers",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "27--35",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346009",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Quality of service of e-commerce sites has been
usually managed by the allocation of resources such as
processors, disks, and network bandwidth, and by
tracking conventional performance metrics such as
response time, throughput, and availability. However,
the metrics that are of utmost importance to the
management of a Web store are revenue and profits.
Thus, resource management schemes for e-commerce
servers should be geared towards optimizing business
metrics as opposed to conventional performance metrics.
This paper introduces a state transition graph called
Customer Behavior Model Graph (CBMG) to describe a
customer session. It then presents a family of
priority-based resource management policies for
e-commerce servers. Priorities change dynamically as a
function of the state a customer is in and as a
function of the amount of money the customer has
accumulated in his/her shopping cart. A detailed
simulation model was developed to assess the gain of
adaptive policies with respect to policies that are
oblivious to economic considerations. Simulation
results show that the adaptive priority scheme
suggested here can increase, during peak periods,
business-oriented metrics such as revenue/sec by as
much as 43\% over the non priority case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Minshall:2000:APP,
author = "Greg Minshall and Yasushi Saito and Jeffrey C. Mogul
and Ben Verghese",
title = "Application performance pitfalls and {TCP}'s {Nagle}
algorithm",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "36--44",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346012",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance improvements to networked applications can
have unintended consequences. In a study of the
performance of the Network News Transport Protocol
(NNTP), the initial results suggested it would be
useful to disable TCP's Nagle algorithm for this
application. Doing so significantly improved latencies.
However, closer observation revealed that with the
Nagle algorithm disabled, the application was
transmitting an order of magnitude more packets. We
found that proper application buffer management
significantly improves performance, but that the Nagle
algorithm still slightly increases mean latency. We
suggest that modifying the Nagle algorithm would
eliminate this cost.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roadknight:2000:FPC,
author = "Chris Roadknight and Ian Marshall and Debbie Vearer",
title = "File popularity characterisation",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "45--50",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346014",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A key determinant of the effectiveness of a web cache
is the locality of the files requested. In the past
this has been difficult to model, as locality appears
to be cache specific. We show that locality can be
characterised with a single parameter, which primarily
varies with the topological position of the cache, and
is largely independent of the culture of the cache
users. Accurate cache models can therefore be built
without any need to consider cultural effects that are
hard to predict.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file popularity; web caches",
}
@Article{Tomlinson:2000:HCI,
author = "Gary Tomlinson and Drew Major and Ron Lee",
title = "High-capacity {Internet} middleware: {Internet}
caching system architectural overview",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "51--56",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346017",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous studies measuring the performance of
general-purpose operating systems running large-scale
Internet server applications, such as proxy caches,
have identified design deficiencies that contribute to
lower than expected performance and scalability. This
paper introduces a high-capacity proxy cache service
built upon a specialized operating system designed to
efficiently support large-scale Internet middleware. It
suggests that specialized operating systems can better
meet the needs of these services than can their
general-purpose counterparts. It concludes with the
measured performance and scalability of this proxy
cache service.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanderMei:2000:DSS,
author = "R. D. van der Mei and W. K. Ehrlich and P. K. Reeser
and J. P. Francisco",
title = "A decision support system for tuning {Web} servers in
distributed object oriented network architectures",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "57--62",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web technologies are currently being employed to
provide end user interfaces in diverse computing
environments. The core element of these Web solutions
is a Web server that is based on the Hypertext Transfer
Protocol (HTTP) running over TCP/IP. Web servers are
required to respond to millions of transaction requests
per day at an `acceptable' Quality of Service (QoS)
level with respect to the end-to-end response time and
the server throughput. In many applications, the server
performs significant server-side processing in
distributed, object-oriented (OO) computing
environments. In these applications, a Web server
retrieves a file, parses the file for scripting
language content, interprets the scripting statements
and then executes embedded code, possibly requiring a
TCP connection to a remote application for data
transfer. In this paper, we present an end-to-end model
that addresses this new class of Web servers that
engage in OO computing. We have implemented the model
in a simulation tool. Performance predictions based on
the simulations are shown to match well with
performance observed in a test environment. Therefore,
the model forms an excellent basis for a Decision
Support System for system architects, allowing them to
predict the behavior of systems prior to their
creation, or the behavior of existing systems under new
load scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "architecture; computing; configuration tuning;
Decision Support System; distributed; HTTP; httpd;
object-oriented; performance; Web server; World Wide
Web",
}
@Article{Chu:2000:CES,
author = "Yang-hua Chu and Sanjay G. Rao and Hui Zhang",
title = "A case for end system multicast (keynote address)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "1--12",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339337",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The conventional wisdom has been that IP is the
natural protocol layer for implementing multicast
related functionality. However, ten years after its
initial proposal, IP Multicast is still plagued with
concerns pertaining to scalability, network management,
deployment and support for higher layer functionality
such as error, flow and congestion control. In this
paper, we explore an alternative architecture for small
and sparse groups, where end systems implement all
multicast related functionality including membership
management and packet replication. We call such a
scheme End System Multicast. This shifting of multicast
support from routers to end systems has the potential
to address most problems associated with IP Multicast.
However, the key concern is the performance penalty
associated with such a model. In particular, End System
Multicast introduces duplicate packets on physical
links and incurs larger end-to-end delay than IP
Multicast. In this paper, we study this question in the
context of the Narada protocol. In Narada, end systems
self-organize into an overlay structure using a fully
distributed protocol. In addition, Narada attempts to
optimize the efficiency of the overlay based on
end-to-end measurements. We present details of Narada
and evaluate it using both simulation and Internet
experiments. Preliminary results are encouraging. In
most simulations and Internet experiments, the delay
and bandwidth penalty are low. We believe the potential
benefits of repartitioning multicast functionality
between end systems and routers significantly outweigh
the performance penalty incurred.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Legout:2000:PFC,
author = "A. Legout and E. W. Biersack",
title = "{PLM}: fast convergence for cumulative layered
multicast transmission schemes",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "13--22",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339340",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major challenge in the Internet is to deliver live
audio/video content with a good quality and to transfer
files to large number of heterogeneous receivers.
Multicast and cumulative layered transmission are two
mechanisms of interest to accomplish this task
efficiently. However, protocols using these mechanisms
suffer from slow convergence time, lack of
inter-protocol fairness or TCP-fairness, and loss
induced by the join experiments.In this paper we define
and investigate the properties of a new multicast
congestion control protocol (called PLM) for
audio/video and file transfer applications based on a
cumulative layered multicast transmission. A
fundamental contribution of this paper is the
introduction and evaluation of a new and efficient
technique based on packet pair to infer which layers to
join. We evaluated PLM for a large variety of scenarios
and show that it converges fast to the optimal link
utilization, induces no loss to track the available
bandwidth, has inter-protocol fairness and
TCP-fairness, and scales with the number of receivers
and the number of sessions. Moreover, all these
properties hold in self similar and multifractal
environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity inference; congestion control; cumulative
layers; FS-paradigm; mulitcast; packet pair",
}
@Article{Sahu:2000:ASD,
author = "Sambit Sahu and Philippe Nain and Christophe Diot and
Victor Firoiu and Don Towsley and Don Iowsley",
title = "On achievable service differentiation with token
bucket marking for {TCP}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "23--33",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339342",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Differentiated services (diffserv) architecture
has been proposed as a scalable solution for providing
service differentiation among flows without any
per-flow buffer management inside the core of the
network. It has been advocated that it is feasible to
provide service differentiation among a set of flows by
choosing an appropriate ``marking profile'' for each
flow. In this paper, we examine (i) whether it is
possible to provide service differentiation among a set
of TCP flows by choosing appropriate marking profiles
for each flow, (ii) under what circumstances, the
marking profiles are able to influence the service that
a TCP flow receives, and, (iii) how to choose a correct
profile to achieve a given service level. We derive a
simple, and yet accurate, analytical model for
determining the achieved rate of a TCP flow when
edge-routers use ``token bucket'' packet marking and
core-routers use active queue management for
preferential packet dropping. From our study, we
observe three important results: (i) the achieved rate
is not proportional to the assured rate, (ii) it is not
always possible to achieve the assured rate and, (iii)
there exist ranges of values of the achieved rate for
which token bucket parameters have no influence. We
find that it is not easy to regulate the service level
achieved by a TCP flow by solely setting the profile
parameters. In addition, we derive conditions that
determine when the bucket size influences the achieved
rate, and rates that can be achieved and those that
cannot. Our study provides insight for choosing
appropriate token bucket parameters for the achievable
rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bolosky:2000:FSD,
author = "William J. Bolosky and John R. Douceur and David Ely
and Marvin Theimer",
title = "Feasibility of a serverless distributed file system
deployed on an existing set of desktop {PCs}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "34--43",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an architecture for a serverless
distributed file system that does not assume mutual
trust among the client computers. The system provides
security, availability, and reliability by distributing
multiple encrypted replicas of each file among the
client machines. To assess the feasibility of deploying
this system on an existing desktop infrastructure, we
measure and analyze a large set of client machines in a
commercial environment. In particular, we measure and
report results on disk usage and content; file
activity; and machine uptimes, lifetimes, and loads. We
conclude that the measured desktop infrastructure would
passably support our proposed system, providing
availability on the order of one unfilled file request
per user per thousand days.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; availability; feasibility
analysis; personal computer usage data; reliability;
security; serverless distributed file system
architecture; trust; workload characterization",
}
@Article{Santos:2000:CRD,
author = "Jose Renato Santos and Richard R. Muntz and Berthier
Ribeiro-Neto",
title = "Comparing random data allocation and data striping in
multimedia servers",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "44--55",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We compare performance of a multimedia storage server
based on a random data allocation layout and block
replication with traditional data striping techniques.
Data striping techniques in multimedia servers are
often designed for restricted workloads, e.g.
sequential access patterns with CBR (constant bit rate)
requirements. On the other hand, a system based on
random data allocation can support virtually any type
of multimedia application, including VBR (variable bit
rate) video or audio, and interactive applications with
unpredictable access patterns, such as 3D interactive
virtual worlds, interactive scientific visualizations,
etc. Surprisingly, our results show that system
performance with random data allocation is competitive
and sometimes even outperforms traditional data
striping techniques, for the workloads for which data
striping is designed to work best; i.e. streams with
sequential access patterns and CBR requirements. Due to
its superiority in supporting general workloads and
competitive system performance, we believe that random
data allocation will be the scheme of choice for next
generation multimedia servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Griffin:2000:MPM,
author = "John Linwood Griffin and Steven W. Schlosser and
Gregory R. Ganger and David F. Nagle",
title = "Modeling and performance of {MEMS}-based storage
devices",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "56--65",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339354",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MEMS-based storage devices are seen by many as
promising alternatives to disk drives. Fabricated using
conventional CMOS processes, MEMS-based storage
consists of thousands of small, mechanical probe tips
that access gigabytes of high-density, nonvolatile
magnetic storage. This paper takes a first step towards
understanding the performance characteristics of these
devices by mapping them onto a disk-like metaphor.
Using simulation models based on the mechanics
equations governing the devices' operation, this work
explores how different physical characteristics (e.g.,
actuator forces and per-tip data rates) impact the
design trade-offs and performance of MEMS-based
storage. Overall results indicate that average access
times for MEMS-based storage are 6.5 times faster than
for a modern disk (1.5 ms vs. 9.7 ms). Results from
filesystem and database bench-marks show that this
improvement reduces application I/O stall times up to
70\%, resulting in overall performance improvements of
3X.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raunak:2000:IPC,
author = "Mohammad S. Raunak and Prashant Shenoy and Pawan Goyal
and Krithi Ramamritham",
title = "Implications of proxy caching for provisioning
networks and servers",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "66--77",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339357",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine the potential benefits of
web proxy caches in improving the effective capacity of
servers and networks. Since networks and servers are
typically provisioned based on a high percentile of the
load, we focus on the effects of proxy caching on the
tail of the load distribution. We find that, unlike
their substantial impact on the average load, proxies
have a diminished impact on the tail of the load
distribution. The exact reduction in the tail and the
corresponding capacity savings depend on the percentile
of the load distribution chosen for provisioning
networks and servers --- the higher the percentile, the
smaller the savings. In particular, compared to over a
50\% reduction in the average load, the savings in
network and server capacity is only 20-35\% for the
99th percentile of the load distribution. We also find
that while proxies can be somewhat useful in smoothing
out some of the burstiness in web workloads; the
resulting workload continues, however, to exhibit
substantial burstiness and a heavy-tailed nature. We
identify large objects with poor locality to be the
limiting factor that diminishes the impact of proxies
on the tail of load distribution. We conclude that,
while proxies are immensely useful to users due to the
reduction in the average response time, they are less
effective in improving the capacities of networks and
servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2000:CWC,
author = "Jiong Yang and Wei Wang and Richard Muntz",
title = "Collaborative {Web} caching based on proxy
affinities",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "78--89",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the exponential growth of hosts and traffic
workloads on the Internet, collaborative web caching
has been recognized as an efficient solution to
alleviate web page server bottlenecks and reduce
traffic. However, cache discovery, i.e., locating where
a page is cached, is a challenging problem, especially
in the fast growing World Wide Web environment, where
the number of participating proxies can be very large.
In this paper, we propose a new scheme which employs
proxy affinities to maintain a dynamic distributed
collaborative caching infrastructure. Web pages are
partitioned into clusters according to proxy reference
patterns. All proxies which frequently access some
page(s) in the same web page cluster form an
``information group''. When web pages belonging to a
web page cluster are deleted from or added into a
proxy's cache, only proxies in the associated
information group are notified. This scheme can be
shown to greatly reduce the number of messages and
other overhead on individual proxies while maintaining
a high cache hit rate. Finally, we employ trace driven
simulation to evaluate our web caching scheme using
three web access trace logs to verify that our caching
structure can provide significant benefits on real
workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aron:2000:CRM,
author = "Mohit Aron and Peter Druschel and Willy Zwaenepoel",
title = "Cluster reserves: a mechanism for resource management
in cluster-based network servers",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "90--101",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In network (e.g., Web) servers, it is often desirable
to isolate the performance of different classes of
requests from each other. That is, one seeks to achieve
that a certain minimal proportion of server resources
are available for a class of requests, independent of
the load imposed by other requests. Recent work
demonstrates how to achieve this performance isolation
in servers consisting of a single, centralized node;
however, achieving performance isolation in a
distributed, cluster based server remains a
problem.This paper introduces a new abstraction, the
cluster reserve, which represents a resource principal
in a cluster based network server. We present a design
and evaluate a prototype implementation that extends
existing techniques for performance isolation on a
single node server to cluster based servers.In our
design, the dynamic cluster-wide resource management
problem is formulated as a constrained optimization
problem, with the resource allocations on individual
machines as independent variables, and the desired
cluster-wide resource allocations as constraints.
Periodically collected resource usages serve as further
inputs to the problem.Experimental results show that
cluster reserves are effective in providing performance
isolation in cluster based servers. We demonstrate
that, in a number of different scenarios, cluster
reserves are effective in ensuring performance
isolation while enabling high utilization of the server
resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barakat:2000:APS,
author = "Chadi Barakat and Eitan Altman",
title = "Analysis of the phenomenon of several slow start
phases in {TCP} (poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "102--103",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339388",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wong:2000:PGQ,
author = "Wai-Man R. Wong and Richard R. Muntz",
title = "Providing guaranteed quality of service for
interactive visualization applications (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "104--105",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339389",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2000:IMF,
author = "Xin Wang and C. Yu and Henning Schulzrinne and Paul
Stirpe and Wei Wu",
title = "{IP} multicast fault recovery in {PIM} over {OSPF}
(poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "106--107",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339390",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lety:2000:CBM,
author = "Emmanuel L{\'e}ty and Thierry Turletti and
Fran{\c{c}}ois Baccelli",
title = "{Cell}-based multicast grouping in large-scale virtual
environments (poster session) (extended abstract)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "108--109",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339392",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2000:TLW,
author = "Shudong Jin and Azer Bestavros",
title = "Temporal locality in {Web} request streams (poster
session) (extended abstract): sources, characteristics,
and caching implications",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "110--111",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339393",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schindler:2000:ADD,
author = "Jiri Schindler and Gregory R. Ganger",
title = "Automated disk drive characterization (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "112--113",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "DIXtrac is a program that automatically characterizes
the performance of modern disk drives. This extended
abstract overviews the contents of [3], which describes
and validates DIXtrac's algorithms for extracting
accurate values for over 100 performance-critical
parameters in 2-6 minutes without human intervention or
special hardware support. The extracted data includes
detailed layout and geometry information, mechanical
timings, cache management policies, and command
processing overheads. DIXtrac is validated by
configuring a detailed disk simulator with its
extracted parameters; in most cases, the resulting
accuracies match those of the most accurate disk
simulators reported in the literature. To date, DIXtrac
has been successfully used on ten different models from
four different manufacturers. A growing database of
validated disk characteristics is available in DiskSim
[1] format at
http://www.ece.cmu.edu/~ganger/disksim/diskspecs.html.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fang:2000:OSP,
author = "Zhen Fang and Lixin Zhang and John Carter and Sally
McKee and Wilson Hsieh",
title = "Online superpage promotion revisited (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "114--115",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nikolaidis:2000:ILL,
author = "Ioanis Nikolaidis and Fulu Li and Ailan Hu",
title = "An inherently loss-less and bandwidth-efficient
periodic broadcast scheme for {VBR} video (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "116--117",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Koksal:2000:AST,
author = "Can Emre Koksal and Hisham Kassab and Hari
Balakrishnan",
title = "An analysis of short-term fairness in wireless media
access protocols (poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "118--119",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joshi:2000:RDH,
author = "Srinath R. Joshi and Injong Rhee",
title = "{RESCU}: dynamic hybrid packet-loss recovery for video
transmission over the {Internet} (poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "120--121",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339403",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current Internet is not reliable; packet loss
rates are frequently high, and varying over time.
Transmitting high-quality interactive video over the
Internet is challenging because the quality of
compressed video is very susceptible to packet losses.
Loss of packets belonging to a video frame manifests
itself not only in the reduced quality of that frame
but also in the propagation of that distortion to
successive frames. This error propagation problem is
inherent in many motion-based video codecs due to the
interdependence of encoded video frames. This paper
presents a dynamic loss recovery scheme, called RESCU,
to address the error propagation problem. In this new
scheme, picture coding patterns are dynamically adapted
to current network conditions in order to maximize the
effectiveness of hybrid transport level recovery
(employing both forward error correction and
retransmission) in reducing error propagation. Since
RESCU does not introduce any playout delay at the
receiver, it is suitable for interactive video
communication. An experimental study based on actual
Internet transmission traces representing various
network conditions shows that dynamic hybrid RESCU
exhibits better error resilience and incurs much less
bit overhead than existing error recovery techniques
such as NEWPRED and Intra-H.261.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padmanabhan:2000:CAD,
author = "Venkata N. Padmanabhan and Lili Qiu",
title = "The content and access dynamics of a busy {Web} server
(poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "122--123",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the MSNBC Web site, one of the busiest in the
Internet today. We analyze the dynamics of content
creation and modification as well as client accesses.
Our key findings are (a) files tend to change little
upon modification, (b) a small set of files get
modified repeatedly, (c) file popularity follows a
Zipf-like distribution with an $ \alpha $ much larger
than reported in previous, proxy-based studies, and (d)
there is significant temporal stability in file
popularity but not much stability in the domains from
which popular content is accessed. We discuss
implications of these findings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:2000:TPB,
author = "Eitan Altman and Konstantin Avrachenkov and Chadi
Barakat",
title = "{TCP} in presence of bursty losses",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "124--133",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.350541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martin:2000:IDR,
author = "Jim Martin and Arne Nilsson and Injong Rhee",
title = "The incremental deployability of {RTT}-based
congestion avoidance for high speed {TCP Internet}
connections",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "134--144",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our research focuses on end-to-end congestion
avoidance algorithms that use round trip time (RTT)
fluctuations as an indicator of the level of network
congestion. The algorithms are referred to as
delay-based congestion avoidance or DCA. Due to the
economics associated with deploying change within an
existing network, we are interested in an incrementally
deployable enhancement to the TCP/Reno protocol. For
instance, TCP/Vegas, a DCA algorithm, has been proposed
as an incremental enhancement. Requiring relatively
minor modifications to a TCP sender, TCP/Vegas has been
shown to increase end-to-end TCP throughput primarily
by avoiding packet loss. We study DCA in today's best
effort Internet where IP switches are subject to
thousands of TCP flows resulting in congestion with
time scales that span orders of magnitude. Our results
suggest that RTT-based congestion avoidance may not be
reliably incrementally deployed in this environment.
Through extensive measurement and simulation, we find
that when TCP/DCA (i.e., a TCP/Reno sender that is
extended with DCA) is deployed over a high speed
Internet path, the flow generally experiences degraded
throughput compared to an unmodified TCP/Reno flow. We
show (1) that the congestion information contained in
RTT samples is not sufficient to predict packet loss
reliably and (2) that the congestion avoidance in
response to delay increase has minimal impact on the
congestion level over the path when the total DCA
traffic at the bottleneck consumes less than 10\% of
the bottleneck bandwidth.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion avoidance; RTT measurement; TCP",
}
@Article{Rubenstein:2000:DSC,
author = "Dan Rubenstein and Jim Kurose and Don Towsley",
title = "Detecting shared congestion of flows via end-to-end
measurement",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "145--155",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current Internet congestion control protocols operate
independently on a per-flow basis. Recent work has
demonstrated that cooperative congestion control
strategies between flows can improve performance for a
variety of applications, ranging from aggregated TCP
transmissions to multiple-sender multicast
applications. However, in order for this cooperation to
be effective, one must first identify the flows that
are congested at the same set of resources. In this
paper, we present techniques based on loss or delay
observations at end-hosts to infer whether or not two
flows experiencing congestion are congested at the same
network resources. We validate these techniques via
queueing analysis, simulation, and experimentation
within the Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2000:MAL,
author = "Xin Wang and Henning Schulzrinne and Dilip Kandlur and
Dinesh Verma",
title = "Measurement and analysis of {LDAP} performance",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "156--165",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339412",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cleveland:2000:IPG,
author = "William S. Cleveland and Dong Lin and Don X. Sun",
title = "{IP} packet generation: statistical models for {TCP}
start times based on connection-rate superposition",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "166--177",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TCP start times for HTTP are nonstationary. The
nonstationarity occurs because the start times on a
link, a point process, are a superposition of source
traffic point processes, and the statistics of
superposition changes as the number of superposed
processes changes. The start time rate is a measure of
the number of traffic sources. The univariate
distribution of the inter-arrival times is
approximately Weibull, and as the rate increases, the
Weibull shape parameter goes to 1, an exponential
distribution. The autocorrelation of the log
inter-arrival times is described by a simple,
two-parameter process: white noise plus a long-range
persistent time series. As the rate increases, the
variance of the persistent series tends to zero, so the
log times tend to white noise. A parsimonious
statistical model for log inter-arrivals accounts for
the autocorrelation, the Weibull distribution, and the
nonstationarity in the two with the rate. The model,
whose purpose is to provide stochastic input to a
network simulator, has the desirable property that the
superposition point process is generated as a single
stream. The parameters of the model are functions of
the rate, so to generate start times, only the rate is
specified. As the rate increases, the model tends to a
Poisson process. These results arise from theoretical
and empirical study based on the concept of
connection-rate superposition. The theory is the
mathematics of superposed point processes, and the
empiricism is an analysis of 23 million TCP connections
organized into 10704 blocks of approximately 15 minutes
each.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hegde:2000:ISH,
author = "Nidhi Hegde and Khosrow Sohraby",
title = "On the impact of soft hand-off in cellular systems",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "178--187",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339414",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a model for soft, hand-off in wireless
cellular networks. In such networks, due to overlapping
cells, hand-offs are not instantaneous and multiple
channels may be occupied by a single mobile for a
non-zero freeze time period.We provide a mathematical
model of wireless cellular networks with soft
hand-offs. We examine different performance measures
and show that freeze time may have a major impact on
the system performance if the mobility rate is not
negligible. Both exact and approximate formulations are
given. Different fixed-point approximation methods are
used to reduce the complexity of the exact solution.
Various performance measures such as new and hand-off
blocking and probability of a call dropout are
carefully examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shakkottai:2000:DAP,
author = "Sanjay Shakkottai and R. Srikant",
title = "Delay asymptotics for a priority queueing system",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "188--195",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339415",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study discrete-time priority
queueing systems fed by a large number of arrival
streams. We first provide bounds on the actual delay
asymptote in terms of the virtual delay asymptote.
Then, under suitable assumptions on the arrival process
to the queue, we show that these asymptotes are the
same. We then consider a priority queueing system with
two queues. Using the earlier result, we derive an
upper bound on the tail probability of the delay. Under
certain assumptions on the rate function of the arrival
process, we show that the upper bound is tight. We then
consider a system with Markovian arrivals and
numerically evaluate the delay tail probability and
validate these results with simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2000:FAI,
author = "Leana Golubchik and John C. S. Lui",
title = "A fast and accurate iterative solution of a
multi-class threshold-based queueing system with
hysteresis",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "196--206",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339416",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our main goal in this work is to develop an efficient
method for solving such models and computing the
corresponding performance measures of interest, which
can subsequently be used in evaluating designs of
threshold-based systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miner:2000:UES,
author = "Andrew S. Miner and Gianfranco Ciardo and Susanna
Donatelli",
title = "Using the exact state space of a {Markov} model to
compute approximate stationary measures",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "207--216",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339417",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a new approximation algorithm based on an
exact representation of the state space $S$, using
decision diagrams, and of the transition rate matrix
$R$, using Kronecker algebra, for a Markov model with
$K$ submodels. Our algorithm builds and solves $K$
Markov chains, each corresponding to a different
aggregation of the exact process, guided by the
structure of the decision diagram, and iterates on
their solution until their entries are stable. We prove
that exact results are obtained if the overall model
has a product-form solution. Advantages of our method
include good accuracy, low memory requirements, fast
execution times, and a high degree of automation, since
the only additional information required to apply it is
a partition of the model into the $K$ submodels. As far
as we know, this is the first time an approximation
algorithm has been proposed where knowledge of the
exact state space is explicitly used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eager:2000:ATH,
author = "Derek L. Eager and Daniel J. Sorin and Mary K.
Vernon",
title = "{AMVA} techniques for high service time variability",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "217--228",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339418",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by experience gained during the validation
of a recent Approximate Mean Value Analysis (AMVA)
model of modern shared memory architectures, this paper
re-examines the ``standard'' AMVA approximation for
non-exponential FCFS queues. We find that this
approximation is often inaccurate for FCFS queues with
high service time variability. For such queues, we
propose and evaluate: (1) AMVA estimates of the mean
residual service time at an arrival instant that are
much more accurate than the standard AMVA estimate, (2)
a new AMVA technique that provides a much more accurate
estimate of mean center residence time than the
standard AMVA estimate, and (3) a new AMVA technique
for computing the mean residence time at a
``downstream'' queue which has a more bursty arrival
process than is assumed in the standard AMVA equations.
Together, these new techniques increase the range of
applications to which AMVA may be fruitfully applied,
so that for example, the memory system architecture of
shared memory systems with complex modern processors
can be analyzed with these computationally efficient
methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ofelt:2000:EPP,
author = "David Ofelt and John L. Hennessy",
title = "Efficient performance prediction for modern
microprocessors",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "229--239",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339419",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Generating an accurate estimate of the performance of
a program on a given system is important to a large
number of people. Computer architects, compiler
writers, and developers all need insight into a
machine's performance. There are a number of
performance estimation techniques in use, from
profile-based approaches to full machine simulation.
This paper discusses a profile-based performance
estimation technique that uses a lightweight
instrumentation phase that runs in order number of
dynamic instructions, followed by an analysis phase
that runs in roughly order number of static
instructions. This technique accurately predicts the
performance of the core pipeline of a detailed
out-of-order issue processor model while scheduling far
fewer instructions than does full simulation. The
difference between the predicted execution time and the
time obtained from full simulation is only a few
percent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Endo:2000:IIP,
author = "Yasuhiro Endo and Margo Seltzer",
title = "Improving interactive performance using {TIPME}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "240--251",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339420",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "On the vast majority of today's computers, the
dominant form of computation is GUI-based user
interaction. In such an environment, the user's
perception is the final arbiter of performance.
Human-factors research shows that a user's perception
of performance is affected by unexpectedly long delays.
However, most performance-tuning techniques currently
rely on throughput-sensitive benchmarks. While these
techniques improve the average performance of the
system, they do little to detect or eliminate
response-time variabilities --- in particular,
unexpectedly long delays.We introduce a measurement
infrastructure that allows us to improve user-perceived
performance by helping us to identify and eliminate the
causes of the unexpected long response times that users
find unacceptable. We describe TIPME (The Interactive
Performance Monitoring Environment), a collection of
measurement tools that allowed us to quickly and easily
diagnose interactive performance ``bugs'' in a mature
operating system. We present two case studies that
demonstrate the effectiveness of our measurement
infrastructure. Each of the performance problems we
identify drastically affects variability in response
time in a mature system, demonstrating that current
tuning techniques do not address this class of
performance problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "interactive performance; monitoring",
}
@Article{Farkas:2000:QEC,
author = "Keith I. Farkas and Jason Flinn and Godmar Back and
Dirk Grunwald and Jennifer M. Anderson",
title = "Quantifying the energy consumption of a pocket
computer and a {Java Virtual Machine}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "252--263",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339421",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine the energy consumption of a
state-of-the-art pocket computer. Using a data
acquisition system, we measure the energy consumption
of the Itsy Pocket Computer, developed by Compaq
Computer Corporation's Palo Alto Research Labs. We
begin by showing that the energy usage characteristics
of the Itsy differ markedly from that of a notebook
computer. Then, since we expect that flexible software
environments will become increasingly prevalent on
pocket computers, we consider applications running in a
Java environment. In particular, we explain some of the
Java design tradeoffs applicable to pocket computers,
and quantify their energy costs. For the design options
we considered and the three workloads we studied, we
find a maximum change in energy use of 25\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2000:MSB,
author = "Jin-Soo Kim and Yarsun Hsu",
title = "Memory system behavior of {Java} programs: methodology
and analysis",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "264--274",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339422",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the memory system behavior of Java
programs by analyzing memory reference traces of
several SPECjvm98 applications running with a
Just-In-Time (JIT) compiler. Trace information is
collected by an exception-based tracing tool called
JTRACE, without any instrumentation to the Java
programs or the JIT compiler.First, we find that the
overall cache miss ratio is increased due to garbage
collection, which suffers from higher cache misses
compared to the application. We also note that going
beyond 2-way cache associativity improves the cache
miss ratio marginally. Second, we observe that Java
programs generate a substantial amount of short-lived
objects. However, the size of frequently-referenced
long-lived objects is more important to the cache
performance, because it tends to determine the
application's working set size. Finally, we note that
the default heap configuration which starts from a
small initial heap size is very inefficient since it
invokes a garbage collector frequently. Although the
direct costs of garbage collection decrease as we
increase the available heap size, there exists an
optimal heap size which minimizes the total execution
time due to the interaction with the virtual memory
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Karlsson:2000:AMW,
author = "Magnus Karlsson and Per Stenstr{\"o}m",
title = "An analytical model of the working-set sizes in
decision-support systems",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "275--285",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an analytical model to study how
working sets scale with database size and other
applications parameters in decision-support systems
(DSS). The model uses application parameters, that are
measured on down-scaled database executions, to predict
cache miss ratios for executions of large databases.By
applying the model to two database engines and typical
DSS queries we find that, even for large databases, the
most performance-critical working set is small and is
caused by the instructions and private data that are
required to access a single tuple. Consequently, its
size is not affected by the database size.
Surprisingly, database data may also exhibit temporal
locality but the size of its working set critically
depends on the structure of the query, the method of
scanning, and the size and the content of the
database.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2000:TAF,
author = "Jongmoo Choi and Sam H. Noh and Sang Lyul Min and
Yookun Cho",
title = "Towards application\slash file-level characterization
of block references: a case for fine-grained buffer
management",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "286--295",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339424",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two contributions are made in this paper. First, we
show that system level characterization of file block
references is inadequate for maximizing buffer cache
performance. We show that a finer-grained
characterization approach is needed. Though application
level characterization methods have been proposed, this
is the first attempt, to the best of our knowledge, to
consider file level characterizations. We propose an
Application/File-level Characterization (AFC) scheme
where we detect on-line the reference characteristics
at the application level and then at the file level, if
necessary. The results of this characterization are
used to employ appropriate replacement policies in the
buffer cache to maximize performance. The second
contribution is in proposing an efficient and fair
buffer allocation scheme. Application or file level
resource management is infeasible unless there exists
an allocation scheme that is efficient and fair. We
propose the $ \Delta $ HIT allocation scheme that takes
away a block from the application/file where the
removal results in the smallest reduction in the number
of expected buffer cache hits. Both the AFC and $
\Delta $ HIT schemes are on-line schemes that detect
and allocate as applications execute. Experiments using
trace-driven simulations show that substantial
performance improvements can be made. For single
application executions the hit ratio increased an
average of 13 percentage points compared to the LRU
policy, with a maximum increase of 59 percentage
points, while for multiple application executions, the
increase is an average of 12 percentage points, with a
maximum of 32 percentage points for the workloads
considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kodialam:2000:OMR,
author = "Murali S. Kodialam and T. V. Lakshman and Sudipta
Sengupta",
title = "Online multicast routing with bandwidth guarantees: a
new approach using multicast network flow",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "296--306",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new algorithm for on-line
routing of bandwidth-guaranteed multicasts where
routing requests arrive one-by-one without there being
any a priori knowledge of future requests. A multicast
routing request consists of a source $s$, a set of
receivers $R$, and a bandwidth requirement $b$. This
multicast routing problem arises in many contexts. Two
applications of interest are routing of
point-to-multipoint label-switched paths in
Multi-Protocol Label Switched (MPLS) networks, and the
provision of bandwidth guaranteed Virtual Private
Network (VPN) services under the ``hose'' service model
[17]. Offline multicast routing algorithms cannot be
used since they require a priori knowledge of all
multicast requests that are to be routed. Instead,
on-line algorithms that handle requests arriving
one-by-one and that satisfy as many potential future
demands as possible are needed. The newly developed
algorithm is an on-line algorithm and is based on the
idea that a newly routed multicast must follow a route
that does not ``interfere too much'' with network paths
that may be critical to satisfy future demands. We
develop a multicast tree selection heuristic that is
based on the idea of deferred loading of certain
``critical'' links. These critical links are identified
by the algorithm as links that, if heavily loaded,
would make it impossible to satisfy future demands
between certain ingress-egress pairs. The presented
algorithm uses link-state information and some
auxiliary capacity information for multicast tree
selection and is amenable to distributed
implementation. Unlike previous algorithms, the
proposed algorithm exploits any available knowledge of
the network ingress-egress points of potential future
demands even though the demands themselves are unknown
and performs very well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multicast routing; network flow; Steiner tree; traffic
engineering",
}
@Article{Gao:2000:SIR,
author = "Lixin Gao and Jennifer Rexford",
title = "Stable {Internet} routing without global
coordination",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "307--317",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Border Gateway Protocol (BGP) allows an autonomous
system (AS) to apply diverse local policies for
selecting routes and propagating reachability
information to other domains. However, BGP permits ASes
to have conflicting policies that can lead to routing
instability. This paper proposes a set of guidelines
for an AS to follow in setting its routing policies,
without requiring coordination with other ASes. Our
approach exploits the Internet's hierarchical structure
and the commercial relationships between ASes to impose
a partial order on the set of routes to each
destination. The guidelines conform to conventional
traffic-engineering practices of ISPs, and provide each
AS with significant flexibility in selecting its local
policies. Furthermore, the guidelines ensure route
convergence even under changes in the topology and
routing policies. Drawing on a formal model of BGP, we
prove that following our proposed policy guidelines
guarantees route convergence. We also describe how our
methodology can be applied to new types of
relationships between ASes, how to verify the
hierarchical AS relationships, and how to realize our
policy guidelines. Our approach has significant
practical value since it preserves the ability of each
AS to apply complex local policies without divulging
its BGP configurations to others.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korkmaz:2000:EAF,
author = "Turgay Korkmaz and Marwan Krunz and Spyros Tragoudas",
title = "An efficient algorithm for finding a path subject to
two additive constraints",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "318--327",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the key issues in providing end-to-end
quality-of-service guarantees in packet networks is how
to determine a feasible route that satisfies a set of
constraints while simultaneously maintaining high
utilization of network resources. In general, finding a
path subject to multiple additive constraints (e.g.,
delay, delay-jitter) is an NP-complete problem that
cannot be exactly solved in polynomial time.
Accordingly, heuristics and approximation algorithms
are often used to address to this problem. Previously
proposed algorithms suffer from either excessive
computational cost or low performance. In this paper,
we provide an efficient approximation algorithm for
finding a path subject to two additive constraints. The
worst-case computational complexity of this algorithm
is within a logarithmic number of calls to Dijkstra's
shortest path algorithm. Its average complexity is much
lower than that, as demonstrated by simulation results.
The performance of the proposed algorithm is justified
via theoretical performance bounds. To achieve further
performance improvement, several extensions to the
basic algorithm are also provided at low extra
computational cost. Extensive simulations are used to
demonstrate the high performance of the proposed
algorithm and to contrast it with other path selection
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multiple constrained path selection; QoS routing;
scalable routing",
}
@Article{Kant:2000:WPA,
author = "Krishna Kant",
title = "{Workshop on Performance and Architecture of Web
Servers (PAWS-2000)}",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "3--4",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.581257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2000:SIS,
author = "Krishna Kant and Prasant Mohapatra",
title = "Scalable {Internet} servers: issues and challenges",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "5--8",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandman:2000:CFW,
author = "Onn Brandman and Junghoo Cho and Hector Garcia-Molina
and Narayanan Shivakumar",
title = "Crawler-friendly {Web} servers",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "9--14",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study how to make web servers (e.g.,
Apache) more crawler friendly. Current web servers
offer the same interface to crawlers and regular web
surfers, even though crawlers and surfers have very
different performance requirements. We evaluate simple
and easy-to-incorporate modifications to web servers so
that there are significant bandwidth savings.
Specifically, we propose that web servers export
meta-data archives describing their content.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burns:2000:CLD,
author = "Randal C. Burns and Darrell D. E. Long and Robert M.
Rees",
title = "Consistency and locking for distributing updates to
{Web} servers using a file system",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "15--21",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed file systems are often used to replicate a
Web site's content among its many servers. However, for
content that needs to be dynamically updated and
distributed to many servers, file system locking
protocols exhibit high latency and heavy network usage.
Poor performance arises because the Web-serving
workload differs from the assumed workload. To address
the shortcomings of file systems, we introduce the {\em
publish consistency\/} model well suited to the
Web-serving workload and implement it in the {\em
producer-consumer\/} locking protocol. A comparison of
this protocol against other file system protocols by
simulation shows that producer-consumer locking removes
almost all latency due to protocol overhead and
significantly reduces network load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vasiliou:2000:PDQ,
author = "Nikolaos Vasiliou and Hanan Lutfiyya",
title = "Providing a differentiated quality of service in a
{World Wide Web} server",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "22--28",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a strategy of extending a Web
server to be able to differentiate between requests in
different classes. This is required because most Web
servers are unable to do this by themselves. We present
our strategy and its design along with some initial
performance results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhattacharjee:2000:BFB,
author = "Samrat Bhattacharjee and William C. Cheng and Cheng-Fu
Chou and Leana Golubchik and Samir Khuller",
title = "{Bistro}: a framework for building scalable wide-area
{\em {Upload\/}} applications",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "29--35",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hot spots are a major obstacle to achieving
scalability in the Internet. At the application layer,
hot spots are usually caused by either (a) high demand
for some data or (b) high demand for a certain service.
This high demand for data or services, is typically the
result of a {\em real-life event\/} involving
availability of new data or approaching deadlines;
therefore, relief of these hot spots may improve
quality of life. At the application layer, hot spot
problems have traditionally been dealt with using some
combination of (1) increasing capacity; (2) spreading
the load over time, space, or both; and (3) changing
the workload. We note that the classes of solutions
stated above have been studied mostly in the context of
applications using the following types of communication
(a) one-to-many, (b) many-to-many, and (c) one-to-one.
However, to the best of our knowledge there is no
existing work on making applications using {\em
many-to-one\/} communication scalable and efficient
(existing solutions, such as web based submissions,
simply use many independent one-to-one transfers). This
corresponds to an important class of applications,
whose examples include the various {\em upload\/}
applications such as submission of income tax forms,
conference paper submission, proposal submission
through the NSF FastLane system, homework and project
submissions in distance education, voting in digital
democracy applications, voting in interactive
television, and many more. Consequently, the main focus
of this paper is {\em scalable infrastructure design
for relief of hot spots in wide-area upload
applications}. The main contributions of this paper are
as follows. We state (a) a new problem, specifically,
the many-to-one communication, or {\em upload}, problem
as well as (b) the (currently) fundamental obstacles to
building scalable wide-area upload applications. We
also propose a general framework, which we term the
{\em Bistro\/} system, for a class of solutions to the
upload problem. In addition, we suggest a number of
open research problems, within this framework,
throughout the paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kraemer:2000:MIO,
author = "E. Kraemer and G. Paix{\~a}o and D. Guedes and W.
{Meira, Jr.} and V. Almeida",
title = "Minimizing the impact of orphan requests in e-commerce
services",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "36--42",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The most common problem of an overloaded
electronic-commerce server is an increase in the
response time perceived by customers, who may restart
their requests hoping to get a faster response, or
simply abort them, giving up on the store. Both
behaviors generate `orphan' requests: although they
were received by the server, they should not be
answered because their requestors have already
abandoned them. Orphan requests waste system resources,
since the server becomes aware of their cancellation
only when it tries to send a response and finds out
that the connection was closed. In this paper we
propose a new kernel service, the Connection Sentry,
which keeps track of requests being performed and
notify processes about an eventual cancellation. Once
notified, a process can interrupt the execution of the
request, saving system resources and bandwidth. We
evaluated the gains by using our proposal in a virtual
bookstore, where we observed that the Connection Sentry
reduced service latency by up to 31\% and increased the
throughput by 27\% in overloaded servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Davison:2000:PPI,
author = "Brian D. Davison and Vincenzo Liberatore",
title = "Pushing politely: improving {Web} responsiveness one
packet at a time",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "43--43",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The rapid growth of traffic on the World-Wide Web
results in heavier loads on networks and servers and in
increased latency experienced while retrieving web
documents. This paper presents a framework that
exploits idle periods to satisfy future HTTP requests
speculatively and opportunistically. Our proposal
differs from previous schemes in that speculative
dissemination always gives precedence to on-demand
traffic, uses ranged requests for improved performance,
and can be implemented over a connectionless transport.
The protocol uses bounded and little server state even
as the workload was increased and it is resistant to
erroneous estimates of available bandwidth. Substantial
latency improvements are reported over pure on-demand
strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2000:CWU,
author = "Martin Arlitt",
title = "Characterizing {Web} user sessions",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "50--63",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a detailed characterization of
user sessions to the 1998 World Cup Web site. This
study analyzes data that was collected from the World
Cup site over a three month period. During this time
the site received 1.35 billion requests from 2.8
million distinct clients. This study focuses on
numerous user session characteristics, including
distributions for the number of requests per session,
number of pages requested per session, session length
and inter-session times. This paper concludes with a
discussion of how these characteristics can be utilized
in improving Web server performance in terms of the
end-user experience.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hadharan:2000:EEP,
author = "R. Hadharan and W. K. Ehrlich and D. Cura and P. K.
Reeser",
title = "End to End Performance Modeling of {Web} Server
Architectures",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "57--63",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.581258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web server performance in a distributed
Object-Oriented (OO) environment is a complex interplay
between a variety of factors (e.g., hardware platform,
threading model, object scope model, server operating
system, network bandwidth, disk file size, caching). In
this paper, we present a model-based approach to Web
Server performance evaluation in terms of an end-to-end
queueing model implemented in a simulation tool. We
have applied this model to Active Server Page (ASP) and
Common Object Model (COM) technology in Microsoft's
Internet Information Server and to the Java Server Page
(JSP) and JavaBean technology in both IIS and Netscape
Enterprise Server (NES). Our results indicate that for
the ASP Script Engine, performance predictions from the
simulation model matched the performance observed in a
test environment. However, for the JSP Script Engine,
the model predicted higher throughput than laboratory
test results at high load. This result suggests that
Web Server performance can be severely limited by a
software bottleneck that causes requests to be
serialized. This may cause a request to wait for some
resource (i.e., a lock) as opposed to consuming CPU or
memory. Implications of these results for Web Server
performance in general are discussed",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2000:AAS,
author = "Weiping Zhu",
title = "An approximate analysis of the shortest queue policy
on soft real-time scheduling",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "3--10",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The {\em join the shortest queue\/} (JSQ) policy is
studied in the context of soft real-time scheduling. An
approximate analysis of the JSQ is developed and
presented in this paper. The result obtained from the
approximate analysis is compared against the simulation
one, that shows the approximate analysis is highly
accurate. Thus, the approximate analysis can be applied
to the development of soft real-time systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2000:SIP,
author = "Bo Li and Kazem Sohraby",
title = "Special Issue on Performance Issues in Mobile
Computing",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "11--11",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.581259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2000:PWC,
author = "Ming Feng Chang and Yi-Bing Lin",
title = "Performance of a weakly consistent wireless {Web}
access mechanism",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "12--20",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In wireless web information access, long response may
be experienced. To reduce the response times of
wireless data access in a mobile network, caches are
utilized in the wireless handheld devices or wireless
proxy server. This paper proposes a wireless web data
access algorithm for WAP (wireless application
protocol) caching proxy to speed up data access. Our
algorithm utilizes the access frequency to tune the
data expiration time. The performance of the algorithm
is investigated and is compared with existing TTL-based
algorithms. Our study indicates that good performance
is expected for the new algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Toh:2000:EAH,
author = "C.-K. Toh and Richard Chen and Minar Delwar and Donald
Allen",
title = "Experimenting with an {Ad Hoc} wireless network on
campus: insights and experiences",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "21--29",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad hoc wireless networks are new communication
networks that can be dynamically formed and deformed
on-the-fly, anytime and anywhere. User data is routed
with the help of an ad hoc mobile routing protocol.
Before the deployment of ad hoc mobile services, the
communication performance of such networks has to be
evaluated to demonstrate the practicality limits based
on today's hardware and innovative communication
software. This paper describes the realization of an ad
hoc wireless testbed and the various experimental field
trials performed on campus. In particular, throughput,
end-to-end delay, route discovery time, and the impact
of varying source packet size and beaconing intervals
are examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lang:2000:PED,
author = "Tanja Lang and Daniel Floreani",
title = "Performance evaluation of different {TCP} error
detection and congestion control strategies over a
wireless link",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "30--38",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an evaluation of the two
major parts of TCP that impact its performance in
wireless environments, namely error detection and
congestion control. We have re-implemented the most
commonly used TCP error detection and congestion
control strategies using a modular design technique.
Using this implementation we have evaluated the
performance in terms of throughput and underlying
network usage of different combinations of these
strategies over a lossy link with high propagation
delay. Our results have shown that selective
acknowledgments work well together with any congestion
control mechanism and that some combinations of error
detection and congestion control suffer from a high
amount of unnecessary retransmissions. Consequentely we
propose a solution to this problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chitre:2000:IBS,
author = "Vikrant A. Chitre and John N. Daigle",
title = "{IP}-based services over {GPRS}",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "39--47",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The utility of mobile computing in the future will be
determined to a large degree by the quality of service
achievable over cellular based systems. In this paper,
we examine the traffic-handling capabilities of General
Packet Radio Service (GPRS) with respect to supporting
IP-based Internet services. We begin with an overview
of GPRS. We then present an analytical model to assess
throughput of the reverse link as a function of the
number of users connected and the distribution of user
message lengths for a scenario in which users are
continuously backlogged. Next, we investigate the
capability of GPRS to support World Wide Web access
using a modified version of the analytical model.
Specifically, we present a realistic scenario for user
sessions operating under the Hypertext Transfer
Protocol (HTTP), and we assess the transaction-handling
capabilities as a function of the number of user
sessions, taking into account network delays, forward
link transmission, random access delay, and other
factors. We also consider a scenario where both
continuously backlogged users and users operating HTTP
sessions are present. We conclude with a discussion of
some open issues in the design of GPRS based Internet
access.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cellular data service; IP over wireless; performance;
queues with contention",
}
@Article{Squillante:2001:SIWa,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA 2000)}",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "2--2",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2001:JPU,
author = "Mor Harchol-Balter",
title = "Job placement with unknown duration and no
preemption",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "3--5",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544399",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2001:OPT,
author = "Leana Golubchik and John C. S. Lui",
title = "Open problems for threshold-based systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "6--8",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2001:TPS,
author = "E. G. {Coffman, Jr.} and Predrag Jelenkovi{\'c}",
title = "Threshold policies for single-resource reservation
systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "9--10",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Requests for a resource arrive at rate $ \lambda $,
each request specifying a future time interval, called
a {\em reservation interval}, to be booked for its use
of the resource. The {\em advance notices\/} (delays
before reservation intervals are to begin) are
independent and drawn from a distribution $ A(z) $. The
durations of reservation intervals are sampled from the
distribution $ B(z) $ and are independent of each other
and the advance notices. We let $A$ and $B$ denote
random variables with the distributions $ A(z)$ and $
B(z)$ (the functional notation will always allow one to
distinguish between our two uses of the symbols $A$ and
$B$).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:2001:LBC,
author = "Joel L. Wolf and Philip S. Yu",
title = "Load balancing for clustered {Web} farms",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "11--13",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a scheme which attempts to optimally
balance the load on the servers of a clustered web
farm. Solving this performance problem is crucial to
achieving minimal average response time for customer
requests, and thus ultimately to achieving maximal
customer throughput. This short paper gives an overview
of three new mathematical contributions. First, we
describe a {\em goal setting\/} algorithm to determine
the load on each server which minimizes the average
customer request response time given the possibly
overlapping cluster assignments of sites to servers and
the current customer request load for each site. The
cluster assignments, which of necessity can only be
changed relatively infrequently, have a major effect on
the optimal response time in the goal setting
component. So, second, we describe a {\em static\/}
algorithm which determines good assignments of sites to
servers. Third, and finally, we describe a {\em
dynamic\/} algorithm which handles the real-time server
load balancing, reacting to the fluctuating customer
request load in order to come as close as possible to
achieving the idealized optimal average response time.
We examine the performance of the overall load
balancing scheme via simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:2001:TAA,
author = "Edmundo {de Souza e Silva} and Rosa M. M. Le{\~a}o and
Morganna C. Diniz",
title = "Transient analysis applied to traffic modeling",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "14--16",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544403",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic modeling has been an extensive area of
research in the last few years, and a lot of modeling
effort has been devoted to better understand the issues
involved in multiplexing traffic over high speed links.
The goals of the performance analyst include the
development of accurate traffic models to predict, with
sufficient accuracy, the impact of the traffic
generated by applications over the network resources,
and the evaluation of the quality of service (QoS)
being achieved. Performance studies include determining
buffer behavior, evaluate cell loss probability,
admission control algorithms, and many others. One
performance study issue is the calculation of {\em
descriptors\/} from different traffic models. In the
literature, one can find a large number of models that
have been proposed, including Markov and non-Markovian
models [1]. Although not possessing the long-range
dependence property, Markov models are still attractive
not only due to their mathematical tractability but
also because it has been shown that long-range
correlations can be approximately obtained from certain
kinds of Markovian models (e.g. [11]). Furthermore,
works such as [8] show that Markov models can be used
to accurately predict performance metrics. Once a set
of traffic models is chosen, the modeler should obtain
the desired performance measures. Hopefully the
measures should be calculated analytically using
efficient algorithms. The modeling steps briefly
outlined above may require the transient analysis of
general Markovian models, including Markov reward
models. One of the goals of this work is to present new
algorithms we developed to obtain efficiently measures
such as the transient queue length distribution (and
from that, the packet loss ratio as a function of time)
directly from the model of the source feeding the
queue. We also obtain second order descriptors such as
the index of dispersion and the autocovariance from the
source models. Using these algorithms the modeler can
evaluate the efficacy of different Markovian models to
predict performance metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bu:2001:FPAa,
author = "T. Bu and D. Towsley",
title = "A fixed point approximation of {TCP} behavior in a
network",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "17--18",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2001:LDA,
author = "Cheng-Shang Chang and Yuh-ming Chiu and Wheyming Tina
Song",
title = "Large deviation analysis for multiplexing independent
regulated inputs",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "19--21",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the performance analysis
problem for a work conserving link with a large number
of independent regulated inputs. For such a problem, we
derive simple stochastic bounds under a general traffic
constraint for the inputs. The bound for queue length
is shown to be a stochastic extension of the
deterministic worst case bound and it is asymptotically
tighter than the bound in Kesidis and Konstantopoulos
[5]. We also test the bound by considering periodic
inputs with independent starting phases. Based on
importance sampling, we propose a fast simulation
algorithm that achieves significant variance reduction.
The simulations results are compared with our
stochastic bound and the bound [5].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kuang:2001:CSA,
author = "Lei Kuang and Armand M. Makowski",
title = "Convex stability and asymptotic convex ordering for
non-stationary arrival processes",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "22--23",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The notion of convex stability for a sequence of
non-negative random variables is discussed in the
context of several applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2001:RRM,
author = "Eitan Bachmat",
title = "Recent results in mathematical modeling and
performance evaluation of disks and disk array",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "24--26",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the seventies and eighties extensive work on
mathematical modeling and performance evaluation of
disks and disk arrays was carried out. The main tools
were stochastic and combinatorial analysis. For the
combinatorial approach led by C. K. Wong and his
collaborators the reader is urged to consult [11]. for
the stochastic approach led by E. G. Coffman and his
collaborators one should consult [3]. Both references
provide rather extensive bibliographies. In the late
eighties and the nineties with the coming of the RAID
`revolution', most of the work in the area has become
rather heuristic in nature, see [5] for a survey, with
a few notable exceptions. In this abstract we would
like to report on two recent results which relate
performance and modeling issues in disks and disk
arrays to the theory of metric spaces and the theory of
graph evolution and phase transition. We hope this will
revive the spirit of the work done in the seventies and
eighties (in other walks of life this may not be
advisable). The results are taken from [2] and [4]",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hogstedt:2001:GCA,
author = "Karin Hogstedt and Doug Kimelman and V. T. Rajan and
Tova Roth and Mark Wegman",
title = "Graph cutting algorithms for distributed applications
partitioning",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "27--29",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of optimally allocating the components of
a distributed program over several machines can be
shown to reduce to a multi-terminal graph cutting
problem. In case of three of more terminals, this
problem has been shown to be NP-hard. This paper
introduces a number of heuristic graph algorithms for
use in partitioning distributed object applications ---
that is, for deciding which objects should be placed on
which machines in order to minimize communication and
achieve best overall performance of the application.
These heuristics are particularly effective for graphs
with characteristics specific to representative
distributed object applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fernandes:2001:TSL,
author = "Paulo Fernandes and Brigitte Plateau",
title = "Triangular solution of linear systems in tensor
product format",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "30--32",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an algorithm to solve linear
systems expressed by a matrix stored in a tensor
product format. The proposed solution is based on a LU
decomposition of the matrix keeping the tensor product
structure. It is shown that the complexity of the
decomposition is negligible and the backward and
forward substitutions are no more complex than two
standard vector-matrices multiplications. Finally,
applications of the proposed algorithm and the
comparison with other similar techniques are
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Capra:2001:UPS,
author = "L. Capra and C. Dutheillet and G. Franceschinis and J.
M. Ili{\'e}",
title = "On the use of partial symmetries for lumping {Markov}
chains",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "33--35",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper a method is proposed, to exploit
partially symmetric behavior of systems for efficient
performance evaluation. The method works on performance
models described with the Stochastic Well-Formed Nets
(SWN) formalism: it allows to automatically discover
partial symmetries in the model behavior, and directly
derive a lumped Markov chain from it, suitable for
performance analysis purposes. With respect to previous
works on automatic exploitation of symmetries in SWNs,
the proposed approach allows a significantly higher
reduction of the state space size in many practical
cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haas:2001:EDN,
author = "Peter J. Haas",
title = "Estimation of delays in non-regenerative
discrete-event stochastic systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "36--38",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2001:DSC,
author = "David Gamarnik",
title = "On deciding stability of constrained random walks and
queueing systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "39--40",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544412",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider in this paper two types of queueing
systems which operate under a specific and fixed
scheduling policy. The first system consists of a
single server and several buffers in which arriving
jobs are stored. We assume that arriving parts may
require several stages of processing in which case each
stage corresponds to a different buffer. The second
system is a communication type queueing network given
by a graph. The arriving jobs (packets) request a
simple path along which they need to be processed. In
both models the jobs arrive in a completely
deterministic fashion: the interarrival times are fixed
and known. All the processing times are also
deterministic. A scheduling policy specifies a rule
using which arriving parts are processed in the
queueing system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2001:AQU,
author = "Mark S. Squillante and Baffelly Woo and Li Zhang",
title = "Analysis of queues under correlated arrivals with
applications to {Web} server performance",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "41--43",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of many high-volume Web sites often
makes it difficult to mathematically analyze various
performance measures. Since these complex behaviors can
have a significant impact on performance, it is
important to capture them in sufficient detail in the
analysis of the corresponding queueing systems. We
consider the access logs from a particular class of
high-volume Web sites serving dynamic content to obtain
a better understanding of the complexities of user
request patterns in such environments. Our analysis
demonstrates that these arrival patterns exhibit strong
dependence structures which can be accurately
represented by an arrival process with strong
(short-range) correlations, at least for the class of
Web sites motivating our study [2]. Based on these
results, we develop a methodology for approximating
this class of dependent arrival processes by a set of
phase-type distributions. Our approach consists of
formulating and solving a nonlinear optimization
problem that fits a set of dependent stochastic models
to approximate the interarrival time patterns from the
data, which includes matching the autocorrelation
function. To evaluate the effectiveness of our
approach, we conduct a large number of statistical
tests and experiments showing that our methodology
provides an excellent match between the real user
request data and the fitted approximate arrival
process. Given this dependent arrival process as input,
we then derive an exact matrix-analytic analysis of a
general multi-server queue under two server queueing
disciplines. This analysis yields results that provide
significant reductions in the numerical computation
required to solve the queueing models. To demonstrate
the accuracy of the performance measures obtained under
these methods, a large number of experiments were
performed and detailed comparisons were made between
the sojourn time measures from our analysis and the
corresponding measures obtained from simulation of the
queueing system under the actual user request data.
These results show both sets of performance measures to
be in excellent agreement, with relative errors
consistently less than 5\%, and further demonstrate the
robustness of our approach. We also conduct a set of
numerical experiments that exploit our matrix-analytic
analysis and its computational efficiency, which are
then used to establish some important results for
multi-server queues under dependent arrival processes.
This includes the notion of effective stability where
the point at which the mean sojourn time of the queue
exceeds a large constant (e.g., 1000) multiplied by the
mean service time occurs well before the theoretical
stability condition for the queue. Due to space
limitations, we simply summarize a subset of our
results in this extended abstract. We refer the
interested reader to [1] for additional details,
references and results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Narlikar:2001:PMF,
author = "Girija Narlikar and Francis Zane",
title = "Performance modeling for fast {IP} lookups",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "1--12",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine algorithms and data
structures for the longest prefix match operation
required for routing IP packets. Previous work, aimed
at hardware implementations, has focused on quantifying
worst case lookup time and memory usage. With the
advent of fast programmable platforms, whether network
processor or PC-based, metrics which look instead at
average case behavior and memory cache performance
become more important. To address this, we consider a
family of data structures capturing the important
techniques used in known fast IP lookup schemes. For
these data structures, we construct a model which,
given an input trace, estimates cache miss rates and
predicts average case lookup performance. This model is
validated using traces with varying characteristics.
Using the model, we then choose the best data structure
from this family for particular hardware platforms and
input traces; we find that the optimal data structure
differs in different settings. The model can also be
used to select the appropriate hardware configurations
for future lookup engines. The lookup performance of
the selected data structures is competitive with the
fastest available software implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qie:2001:SCS,
author = "Xiaohu Qie and Andy Bavier and Larry Peterson and
Scott Karlin",
title = "Scheduling computations on a software-based router",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "13--24",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent efforts to add new services to the Internet
have increased the interest in software-based routers
that are easy to extend and evolve. This paper
describes our experiences implementing a software-based
router, with a particular focus on the main difficulty
we encountered: how to schedule the router's CPU
cycles. The scheduling decision is complicated by the
desire to differentiate the level of service for
different packet flows, which leads to two fundamental
conflicts: (1) assigning processor shares in a way that
keeps the processes along the forwarding path in
balance while meeting QoS promises, and (2) adjusting
the level of batching in a way that minimizes overhead
while meeting QoS promises.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2001:DMP,
author = "Xun Su and Gustavo de Veciana",
title = "Dynamic multi-path routing: asymptotic approximation
and simulations",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "25--36",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the dynamic multi-path routing
problem. We focus on an operating regime where traffic
flows arrive at and depart from the network in a bursty
fashion, and where the delays involved in link state
advertisement may lead to `synchronization' effects
that adversely impact the performance of dynamic
single-path routing schemes. We start by analyzing a
simple network of parallel links, where the goal is to
minimize the average increase in network congestion on
the time scale of link state advertisements. We
consider an asymptotic regime leading to an
optimization problem permitting closed-form analysis of
the number of links over which dynamic multi-path
routing should be conducted. Based on our analytical
result we examine three types of dynamic routing
schemes, and identify a robust policy, {\em i.e.},
routing the traffic to a set of links with loads within
a factor of the least loaded, that exhibits robust
performance. We then propose a similar policy for mesh
networks and show by simulation some of its desirable
properties. The main results suggest that our proposal
would provide significant performance improvement for
high speed networks carrying bursty traffic flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jones:2001:PRS,
author = "Michael B. Jones and Stefan Saroiu",
title = "Predictability requirements of a soft modem",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "37--49",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em Soft Modems\/} use the main processor to execute
modem functions traditionally performed by hardware on
the modem card. To function correctly, soft modems
require that ongoing signal processing computations be
performed on the host CPU in a timely manner. Thus,
signal processing is a commonly occurring background
real-time application---one running on systems that
were not designed to support predictable real-time
execution. This paper presents a detailed study of the
performance characteristics and resource requirements
of a popular soft modem. Understanding these
requirements should inform the efforts of those
designing and building operating systems needing to
support soft modems. Furthermore, we believe that the
conclusions of this study also apply to other existing
and upcoming soft devices, such as soft Digital
Subscriber Line (DSL) cards. We conclude that (1)
signal processing in an interrupt handler is not only
unnecessary but also detrimental to the predictability
of other computations in the system and (2) a real-time
scheduler can provide predictability for the soft modem
while minimizing its impact on other computations in
the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CPU scheduling; open real-time system; real-time;
Rialto; Rialto/NT; signal processing; soft devices;
soft modem; Windows 2000; Windows NT",
}
@Article{Lorch:2001:IDV,
author = "Jacob R. Lorch and Alan Jay Smith",
title = "Improving dynamic voltage scaling algorithms with
{PACE}",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "50--61",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378429",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses algorithms for dynamically
varying (scaling) CPU speed and voltage in order to
save energy. Such scaling is useful and effective when
it is immaterial when a task completes, as long as it
meets some deadline. We show how to modify any scaling
algorithm to keep performance the same but minimize
expected energy consumption. We refer to our approach
as PACE (Processor Acceleration to Conserve Energy)
since the resulting schedule increases speed as the
task progresses. Since PACE depends on the probability
distribution of the task's work requirement, we present
methods for estimating this distribution and evaluate
these methods on a variety of real workloads. We also
show how to approximate the optimal schedule with one
that changes speed a limited number of times. Using
PACE causes very little additional overhead, and yields
substantial reductions in CPU energy consumption.
Simulations using real workloads show it reduces the
CPU energy consumption of previously published
algorithms by up to 49.5\%, with an average of 20.6\%,
without any effect on performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaidyanathan:2001:AIS,
author = "Kalyanaraman Vaidyanathan and Richard E. Harper and
Steven W. Hunter and Kishor S. Trivedi",
title = "Analysis and implementation of software rejuvenation
in cluster systems",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "62--71",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378434",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several recent studies have reported the phenomenon of
`software aging', one in which the state of a software
system degrades with time. This may eventually lead to
performance degradation of the software or crash/hang
failure or both. `Software rejuvenation' is a
pro-active technique aimed to prevent unexpected or
unplanned outages due to aging. The basic idea is to
stop the running software, clean its internal state and
restart it. In this paper, we discuss software
rejuvenation as applied to cluster systems. This is
both an innovative and an efficient way to improve
cluster system availability and productivity. Using
Stochastic Reward Nets (SRNs), we model and analyze
cluster systems which employ software rejuvenation. For
our proposed time-based rejuvenation policy, we
determine the optimal rejuvenation interval based on
system availability and cost. We also introduce a new
rejuvenation policy based on prediction and show that
it can dramatically increase system availability and
reduce downtime cost. These models are very general and
can capture a multitude of cluster system
characteristics, failure behavior and performability
measures, which we are just beginning to explore. We
then briefly describe an implementation of a software
rejuvenation system that performs periodic and
predictive rejuvenation, and show some empirical data
from systems that exhibit aging",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Loh:2001:TSA,
author = "Gabriel Loh",
title = "A time-stamping algorithm for efficient performance
estimation of superscalar processors",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "72--81",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378437",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing complexity of modern superscalar
microprocessors makes the evaluation of new designs and
techniques much more difficult. Fast and accurate
methods for simulating program execution on realistic
and hypothetical processor models are of great interest
to many computer architects and compiler writers. There
are many existing techniques, from profile based
runtime estimation to complete cycle-level simulations.
Many researchers choose to sacrifice the speed of
profiling for the accuracy obtainable by cycle-level
simulators. This paper presents a technique that
provides accurate performance predictions, while
avoiding the complexity associated with a complete
processor emulator. The approach augments a fast
in-order simulator with a time-stamping algorithm that
provides a very good estimate of program running time.
This algorithm achieves an average accuracy that is
within 7.5\% of a cycle-level out-of-order simulator in
approximately 41\% of the running time on the eight
SPECInt95 integer benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2001:IFI,
author = "Thomas Bonald and Laurent Massouli{\'e}",
title = "Impact of fairness on {Internet} performance",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "82--91",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378438",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss the relevance of fairness as a design
objective for congestion control mechanisms in the
Internet. Specifically, we consider a backbone network
shared by a dynamic number of short-lived flows, and
study the impact of bandwidth sharing on network
performance. In particular, we prove that for a broad
class of fair bandwidth allocations, the total number
of flows in progress remains finite if the load of
every link is less than one. We also show that provided
the bandwidth allocation is `sufficiently' fair,
performance is optimal in the sense that the throughput
of the flows is mainly determined by their access rate.
Neither property is guaranteed with unfair bandwidth
allocations, when priority is given to one class of
flow with respect to another. This suggests current
proposals for a differentiated services Internet may
lead to suboptimal utilization of network resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salamatian:2001:HMM,
author = "Kav{\'e} Salamatian and Sandrine Vaton",
title = "Hidden {Markov} modeling for network communication
channels",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "92--101",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378439",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we perform the statistical analysis of
an Internet communication channel. Our study is based
on a Hidden Markov Model (HMM). The channel switches
between different states; to each state corresponds the
probability that a packet sent by the transmitter will
be lost. The transition between the different states of
the channel is governed by a Markov chain; this Markov
chain is not observed directly, but the received packet
flow provides some probabilistic information about the
current state of the channel, as well as some
information about the parameters of the model. In this
paper we detail some useful algorithms for the
estimation of the channel parameters, and for making
inference about the state of the channel. We discuss
the relevance of the Markov model of the channel; we
also discuss how many states are required to
pertinently model a real communication channel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active measurement expectation-maximization; hidden
Markov model; Internet modelling; network state
estimation",
}
@Article{Cao:2001:NIT,
author = "Jin Cao and William S. Cleveland and Dong Lin and Don
X. Sun",
title = "On the nonstationarity of {Internet} traffic",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "102--112",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378440",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic variables on an uncongested Internet wire
exhibit a pervasive nonstationarity. As the rate of new
TCP connections increases, arrival processes (packet
and connection) tend locally toward Poisson, and time
series variables (packet sizes, transferred file sizes,
and connection round-trip times) tend locally toward
independent. The cause of the nonstationarity is
superposition: the intermingling of sequences of
connections between different source-destination pairs,
and the intermingling of sequences of packets from
different connections. We show this empirically by
extensive study of packet traces for nine links coming
from four packet header databases. We show it
theoretically by invoking the mathematical theory of
point processes and time series. If the connection rate
on a link gets sufficiently high, the variables can be
quite close to Poisson and independent; if major
congestion occurs on the wire before the rate gets
sufficiently high, then the progression toward Poisson
and independent can be arrested for some variables.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hsieh:2001:PCC,
author = "Hung-Yun Hsieh and Raghupathy Sivakumar",
title = "Performance comparison of cellular and multi-hop
wireless networks: a quantitative study",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "113--122",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378441",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the performance trade-offs
between conventional cellular and multi-hop ad-hoc
wireless networks. We compare through simulations the
performance of the two network models in terms of raw
network capacity, end-to-end throughput, end-to-end
delay, power consumption, per-node fairness (for
throughput, delay, and power), and impact of mobility
on the network performance. The simulation results show
that while ad-hoc networks perform better in terms of
throughput, delay, and power, they suffer from
unfairness and poor network performance in the event of
mobility. We discuss the trade-offs involved in the
performance of the two network models, identify the
specific reasons behind them, and argue that the
trade-offs preclude the adoption of either network
model as a clear solution for future wireless
communication systems. Finally, we present a simple
hybrid wireless network model that has the combined
advantages of cellular and ad-hoc wireless networks but
does not suffer from the disadvantages of either.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hegde:2001:BLM,
author = "Nidhi Hegde and Khosrow Sohraby",
title = "Blocking in large mobile cellular networks with bursty
traffic",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "123--132",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378442",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider large cellular networks. The traffic
entering the network is assumed to be correlated in
both {\em space\/} and {\em time.\/} The space
dependency captures the possible correlation between
the arrivals to different nodes in the network, while
the time dependency captures the time correlation
between arrivals to each node. We model such traffic
with a Markov-Modulated Poisson Process(MMPP).It is
shown that even in the single node environment, the
problem is not mathematically tractable. A model with
an infinite number of circuits is used to approximate
the finite model. A novel recursive methodology is
introduced in finding the joint moments of the number
of busy circuits in different cells in the network
leading to accurate determination of blocking
probability. A simple mixed-Poisson distribution is
introduced as an accurate approximation of the
distribution of the number of busy circuits. We show
that for certain cases, in the system with an infinite
number of circuits in each cell, there is no effect of
mobility on the performance of the system. Our
numerical results indicate that the traffic burstiness
has a major impact on the system performance. The
mixed-Poisson approximation is found to be a very good
fit to the exact finite model. The performance of this
approximation using few moments is affected by traffic
burstiness and average load. We find that in a
reasonable range of traffic burstiness, the
mixed-Poisson distribution provides a close
approximation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:2001:CEF,
author = "Apurva Kumar and Rajeev Gupta",
title = "Capacity evaluation of frequency hopping based ad-hoc
systems",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "133--142",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378443",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The IEEE 802.15 Wireless Personal Area Networks (WPAN)
study group has been working on evolving a standard for
short-range wireless connectivity between low
complexity and low power devices operating within the
personal operating space (POS). The scenarios
envisioned for WPANs are likely to involve a large
number of POSs operating in an indoor environment.
Among short-range wireless technologies, Bluetooth$^{TM
1}$ based ad-hoc connectivity comes closest to
satisfying the WPAN requirements. Bluetooth provides a
gross rate of 1 Mbps per network and allows several
such networks to overlap using frequency hopping. The
`aggregate throughput' thus achieved is much higher
than 1 Mbps. In the absence of external interfering
sources, aggregate throughput is limited by self
interference which depends upon, (i) physical layer
parameters like hopping rate, hopping sequences,
transmitted power, receiver sensitivity, modulation,
forward error correction (ii) channel characteristics
like coherence bandwidth and coherence time (iii)
spatial characteristics. In this work we consider the
problem of finding the capacity of Bluetooth based
ad-hoc systems by accurately modeling the Bluetooth
physical layer and the indoor wireless channel. We
predict the throughput in Bluetooth based ad-hoc
systems as a function of a generalized set of
parameters using realistic scenarios and assumptions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ad-hoc networks; bit error rate; Bluetooth technology;
capacity; forward error correction; frequency hopping;
GFSK; throughput",
}
@Article{Qiu:2001:NPF,
author = "Dongyu Qiu and Ness B. Shroff",
title = "A new predictive flow control scheme for efficient
network utilization and {QoS}",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "143--153",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378777",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we develop a new predictive flow control
scheme and analyze its performance. This scheme
controls the non-real-time traffic based on predicting
the real-time traffic. The goal of the work is to
operate the network in a low congestion, high
throughput regime. We provide a rigorous analysis of
the performance of our flow control method and show
that the algorithm has attractive and useful
properties. From our analysis we obtain an explicit
condition that gives us design guidelines on how to
choose a predictor. We learn that it is especially
important to take the queueing effect into account in
developing the predictor. We also provide numerical
results comparing different predictors that use varying
degrees of information from the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paschalidis:2001:MBE,
author = "Ioannis Ch. Paschalidis and Spyridon Vassilaras",
title = "Model-based estimation of buffer overflow
probabilities from measurements",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "154--163",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378778",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of estimating buffer overflow
probabilities when the statistics of the input traffic
are not known and have to be estimated from
measurements. We start by investigating the use of
Markov-modulated processes in modeling the input
traffic and propose a method for selecting an optimal
model based on Akaike's Information Criterion. We then
consider a queue fed by such a Markov-modulated input
process and use large deviations asymptotics to obtain
the buffer overflow probability. The expression for
this probability is affected by estimation errors in
the parameters of the input model. We analyze the
effect of these errors and propose a new, more robust,
estimator which is less likely to underestimate the
overflow probability than the estimator obtained by
certainty equivalence. As such, it is appropriate in
situations where the overflow probability is associated
with {\em Quality of Service (QoS)\/} and we need to
provide firm QoS guarantees. Nevertheless, as the
number of observations increases, the proposed
estimator converges with probability 1 to the
appropriate target, and thus, does not lead to resource
underutilization in this limit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Akaike's information criterion; effective bandwidth;
estimation; large deviations; Markov-modulated
processes",
}
@Article{Dutta:2001:OTG,
author = "Rudra Dutta and George N. Rouskas",
title = "On optimal traffic grooming in {WDM} rings",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "164--174",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of designing a virtual
topology to minimize electronic routing, that is,
grooming traffic, in wavelength routed optical rings.
We present a new framework consisting of a sequence of
bounds, both upper and lower, in which each successive
bound is at least as strong as the previous one. The
successive bounds take larger amounts of computation to
evaluate, and the number of bounds to be evaluated for
a given problem instance is only limited by the
computational power available. The bounds are based on
decomposing the ring into sets of nodes arranged in a
path, and adopting the locally optimal topology within
each set. Our approach can be applied to many virtual
topology problems on rings. The upper bounds we obtain
also provide a useful series of heuristic solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{LeBoudec:2001:SPV,
author = "Jean-Yves {Le Boudec}",
title = "Some properties of variable length packet shapers",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "175--183",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The min-plus theory of greedy shapers has been
developed after Cruz's results on the calculus of
network delays. An example of greedy shaper is the
buffered leaky bucket controller. The theory of greedy
shapers establishes a number of properties; for
example, re-shaping keeps original arrival constraints.
The existing theory applies in all rigor either to
fluid systems, or to packets of constant size such as
ATM. For variable length packets, the distortion
introduced by packetization affects the theory, which
is no longer valid. Chang has introduced the concept of
packetizer, which models the effect of variable length
packets, and has also developed a max-plus theory of
shapers. In this paper, we start with the min-plus
theory, and obtain results on greedy shapers for
variable length packets which are not readily explained
with the max-plus theory of Chang. We show a
fundamental result, namely, the min-plus representation
of a packetized greedy shaper. This allows us to prove
that, under some assumptions, re-shaping a flow of
variable length packets does keep original arrival
constraints. However, we show on some examples that if
the assumptions are not satisfied, then the property
may not hold any more. We also demonstrate the
equivalence of implementing a buffered leaky bucket
controller based on either virtual finish times or on
bucket replenishment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "leaky bucket; min-plus algebra; network calculus;
shaper",
}
@Article{Chang:2001:PMI,
author = "Cheng-Shang Chang and Yuh-ming Chiu and Wheyming Tina
Song",
title = "On the performance of multiplexing independent
regulated inputs",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "184--193",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the performance analysis
problem for a work conserving link with a large number
of independent regulated inputs. For such a problem, we
derive simple stochastic bounds under a general traffic
constraint for the inputs. The bound for queue length
is shown to be a stochastic extension of the
deterministic worst case bound and it is asymptotically
tighter than the bound in Kesidis and Konstantopoulos
[23]. We also test the bound by considering periodic
inputs with independent starting phases. Based on
Sanov's theorem and importance sampling, we propose a
fast simulation algorithm that achieves significant
variance reduction. The simulations results are
compared with our stochastic bound and the bound in
[23].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fast simulation; multiplexing; performance bounds",
}
@Article{Shuf:2001:CMB,
author = "Yefim Shuf and Mauricio J. Serrano and Manish Gupta
and Jaswinder Pal Singh",
title = "Characterizing the memory behavior of {Java}
workloads: a structured view and opportunities for
optimizations",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "194--205",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378783",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the memory behavior of important
Java workloads used in benchmarking Java Virtual
Machines (JVMs), based on instrumentation of both
application and library code in a state-of-the-art JVM,
and provides structured information about these
workloads to help guide systems' design. We begin by
characterizing the inherent memory behavior of the
benchmarks, such as information on the breakup of heap
accesses among different categories and on the hotness
of references to fields and methods. We then provide
detailed information about misses in the data TLB and
caches, including the distribution of misses over
different kinds of accesses and over different methods.
In the process, we make interesting discoveries about
TLB behavior and limitations of data prefetching
schemes discussed in the literature in dealing with
pointer-intensive Java codes. Throughout this paper, we
develop a set of recommendations to computer architects
and compiler writers on how to optimize computer
systems and system software to run Java programs more
efficiently. This paper also makes the first attempt to
compare the characteristics of SPECjvm98 to those of a
server-oriented benchmark, pBOB, and explain why the
current set of SPECjvm98 benchmarks may not be adequate
for a comprehensive and objective evaluation of JVMs
and just-in-time (JIT) compilers. We discover that the
fraction of accesses to array elements is quite
significant, demonstrate that the number of `hot spots'
in the benchmarks is small, and show that field
reordering cannot yield significant performance gains.
We also show that even a fairly large L2 data cache is
not effective for many Java benchmarks. We observe that
instructions used to prefetch data into the L2 data
cache are often squashed because of high TLB miss rates
and because the TLB does not usually have the
translation information needed to prefetch the data
into the L2 data cache. We also find that co-allocation
of frequently used method tables can reduce the number
of TLB misses and lower the cost of accessing type
information block entries in virtual method calls and
runtime type checking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sohoni:2001:SMS,
author = "Sohum Sohoni and Rui Min and Zhiyong Xu and Yiming
Hu",
title = "A study of memory system performance of multimedia
applications",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "206--215",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378784",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multimedia applications are fast becoming one of the
dominating workloads for modern computer systems. Since
these applications normally have large data sets and
little data-reuse, many researchers believe that they
have poor memory behavior compared to traditional
programs, and that current cache architectures cannot
handle them well. It is therefore important to
quantitatively characterize the memory behavior of
these applications in order to provide insights for
future design and research of memory systems. However,
very few results on this topic have been published.
This paper presents a comprehensive research on the
memory requirements of a group of programs that are
representative of multimedia applications. These
programs include a subset of the popular MediaBench
suite and several large multimedia programs running on
the Linux, Windows NT and Tru UNIX operating systems.
We performed extensive measurement and trace-driven
simulation experiments. We then compared the memory
utilization of these programs to that of SPECint95
applications. We found that multimedia applications
actually have better memory behavior than SPECint95
programs. The high cache hit rates of multimedia
applications can be contributed to the following three
factors. Most multimedia applications apply block
partitioning algorithms to the input data, and work on
small blocks of data that easily fit into the cache.
Secondly, within these blocks, there is significant
data reuse as well as spatial locality. The third
reason is that a large number of references generated
by multimedia applications are to their internal data
structures, which are relatively small and can also
easily fit into reasonably-sized caches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bu:2001:FPAb,
author = "Tian Bu and Don Towsley",
title = "Fixed point approximations for {TCP} behavior in an
{AQM} network",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "216--225",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378786",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we explore the use of fixed point
methods to evaluate the performance of a large
population of TCP flows traversing a network of routers
implementing active queue management (AQM) such as RED
(random early detection). Both AQM routers that drop
and that mark packets are considered along with
infinite and finite duration TCP flows. In the case of
finite duration flows, we restrict ourselves to
networks containing one congested router. In all cases,
we formulate a fixed point problem with the router
average queue lengths as unknowns. Once these are
obtained, other metrics such as router loss
probability, TCP flow throughput, TCP flow end-to-end
loss rates, average round trip time, and average
session duration are easily obtained. Comparison with
simulation for a variety of scenarios shows that the
model is accurate in its predictions (mean errors less
than 5\%). Last, we establish monotonicity properties
exhibited by the solution for a single congested router
that explains several interesting observations, such as
TCP SACK suffers higher loss than TCP Reno.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Low:2001:UTV,
author = "Steven H. Low and Larry Peterson and Limin Wang",
title = "Understanding {TCP Vegas}: a duality model",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "226--235",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378787",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a model of the TCP Vegas
congestion control mechanism as a distributed
optimization algorithm. Doing so has three important
benefits. First, it helps us gain a fundamental
understanding of why TCP Vegas works, and an
appreciation of its limitations. Second, it allows us
to prove that Vegas stabilizes at a weighted
proportionally fair allocation of network capacity when
there is sufficient buffering in the network. Third, it
suggests how we might use explicit feedback to allow
each Vegas source to determine the optimal sending rate
when there is insufficient buffering in the network. We
present simulation results that validate our
conclusions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Talim:2001:CRW,
author = "J. Talim and Z. Liu and Ph. Nain and E. G. {Coffman,
Jr.}",
title = "Controlling the robots of {Web} search engines",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "236--244",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378788",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Robots are deployed by a Web search engine for
collecting information from different Web servers in
order to maintain the currency of its data base of Web
pages. In this paper, we investigate the number of
robots to be used by a search engine so as to maximize
the currency of the data base without putting an
unnecessary load on the network. We adopt a
finite-buffer queueing model to represent the system.
The arrivals to the queueing system are Web pages
brought by the robots; service corresponds to the
indexing of these pages. Good performance requires that
the number of robots, and thus the arrival rate of the
queueing system, be chosen so that the indexing queue
is rarely starved or saturated. Thus, we formulate a
multi-criteria stochastic optimization problem with the
loss rate and empty-buffer probability being the
criteria. We take the common approach of reducing the
problem to one with a single objective that is a linear
function of the given criteria. Both static and dynamic
policies can be considered. In the static setting the
number of robots is held fixed; in the dynamic setting
robots may be re-activated/de-activated as a function
of the state. Under the assumption that arrivals form a
Poisson process and that service times are independent
and exponentially distributed random variables, we
determine an optimal decision rule for the dynamic
setting, i.e., a rule that varies the number of robots
in such a way as to minimize a given linear function of
the loss rate and empty-buffer probability. Our results
are compared with known results for the static case. A
numerical study indicates that substantial gains can be
achieved by dynamically controlling the activity of the
robots.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Markov decision process; queues; web robots; Web
search engines",
}
@Article{Smith:2001:WTI,
author = "F. Donelson Smith and F{\'e}lix Hern{\'a}ndez Campos
and Kevin Jeffay and David Ott",
title = "What {TCP\slash IP} protocol headers can tell us about
the {Web}",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "245--256",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378789",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We report the results of a large-scale empirical study
of web traffic. Our study is based on over 500 GB of
TCP/IP protocol-header traces collected in 1999 and
2000 (approximately one year apart) from the high-speed
link connecting The University of North Carolina at
Chapel Hill to its Internet service provider. We also
use a set of smaller traces from the NLANR repository
taken at approximately the same times for comparison.
The principal results from this study are: (1)
empirical data suitable for constructing traffic
generating models of contemporary web traffic, (2) new
characterizations of TCP connection usage showing the
effects of HTTP protocol improvement, notably
persistent connections ({\em e.g.}, about 50\% of web
objects are now transferred on persistent connections),
and (3) new characterizations of web usage and content
structure that reflect the influences of `banner ads,'
server load balancing, and content distribution. A
novel aspect of this study is a demonstration that a
relatively light-weight methodology based on passive
tracing of only TCP/IP headers and off-line analysis
tools can provide timely, high quality data about web
traffic. We hope this will encourage more researchers
to undertake on-going data collection and provide the
research community with data about the rapidly evolving
characteristics of web traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nahum:2001:EWA,
author = "Erich M. Nahum and Marcel-Catalin Rosu and Srinivasan
Seshan and Jussara Almeida",
title = "The effects of wide-area conditions on {WWW} server
performance",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "257--267",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378790",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "WWW workload generators are used to evaluate web
server performance, and thus have a large impact on
what performance optimizations are applied to servers.
However, current benchmarks ignore a crucial component:
how these servers perform in the environment in which
they are intended to be used, namely the wide-area
Internet. This paper shows how WAN conditions can
affect WWW server performance. We examine these effects
using an experimental test-bed which emulates WAN
characteristics in a live setting, by introducing
factors such as delay and packet loss in a controlled
and reproducible fashion. We study how these factors
interact with the host TCP implementation and what
influence they have on web server performance. We
demonstrate that when more realistic wide-area
conditions are introduced, servers exhibit very
different performance properties and scaling behaviors,
which are not exposed by existing benchmarks running on
LANs. We show that observed throughputs can give
misleading information about server performance, and
thus find that maximum throughput, or capacity, is a
more useful metric. We find that packet losses can
reduce server capacity by as much as 50 percent and
increase response time as seen by the client. We show
that using TCP SACK can reduce client response time,
without reducing server capacity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nain:2001:MMQ,
author = "Philippe Nain and Redusindo N{\'u}{\~n}ez-Queija",
title = "A {M/M/1} queue in a semi-{Markovian} environment",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "268--278",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378791",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an M/M/1 queue in a semi-Markovian
environment. The environment is modeled by a two-state
semi-Markov process with arbitrary sojourn time
distributions $ F_0 (x) $ and $ F_1 (x) $. When in
state $ i = 0, 1 $, customers are generated according
to a Poisson process with intensity $ \lambda_i $ and
customers are served according to an exponential
distribution with rate $ \mu_i $. Using the theory of
Riemann--Hilbert boundary value problems we compute the
$z$-transform of the queue-length distribution when
either $ F_0 (x)$ or $ F_1 (x)$ has a rational
Laplace--Stieltjes transform and the other may be a
general --- possibly heavy-tailed --- distribution. The
arrival process can be used to model bursty traffic
and/or traffic exhibiting long-range dependence, a
situation which is commonly encountered in networking.
The closed-form results lend themselves for numerical
evaluation of performance measures, in particular the
mean queue-length.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aueueing; bursty traffic; communication networks;
heavy-tailed distribution; long-range dependence;
Riemann--Hilbert boundary value problem; stochastic
modeling",
}
@Article{Bansal:2001:ASS,
author = "Nikhil Bansal and Mor Harchol-Balter",
title = "Analysis of {SRPT} scheduling: investigating
unfairness",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "279--290",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378792",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Shortest-Remaining-Processing-Time (SRPT)
scheduling policy has long been known to be optimal for
minimizing mean response time (sojourn time). Despite
this fact, SRPT scheduling is rarely used in practice.
It is believed that the performance improvements of
SRPT over other scheduling policies stem from the fact
that SRPT unfairly penalizes the large jobs in order to
help the small jobs. This belief has led people to
instead adopt `fair' scheduling policies such as
Processor-Sharing (PS), which produces the same
expected slowdown for jobs of all sizes. This paper
investigates formally the problem of unfairness in SRPT
scheduling as compared with PS scheduling. The analysis
assumes an M/G/1 model, and emphasizes job size
distributions with a heavy-tailed property, as are
characteristic of empirical workloads. The analysis
shows that the degree of unfairness under SRPT is
surprisingly small. The M/G/1/SRPT and M/G/1/PS queues
are also analyzed under overload and closed-form
expressions for mean response time as a function of job
size are proved in this setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luthi:2001:IPC,
author = "Johannes L{\"u}thi and Catalina M. Llad{\'o}",
title = "Interval parameters for capturing uncertainties in an
{EJB} performance model",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "291--300",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378794",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Exact as well as approximate analytical solutions for
quantitative performance models of computer systems are
usually obtained by performing a series of arithmetical
operations on the input parameters of the model.
However, especially during early phases of system
design and implementation, not all the parameter values
are usually known exactly. In related research
contributions, intervals have been proposed as a means
to capture parameter uncertainties. Furthermore,
methods to adapt existing solution algorithms to
parameter intervals have been discussed. In this paper
we present the adaptation of an existing performance
model to parameter intervals. The approximate solution
of a queueing network modelling an Enterprise JavaBeans
server implementation is adapted to interval arithmetic
in order to represent the uncertainty in some of the
parameters of the model. A new interval splitting
method is applied to obtain reasonable tight
performance measure intervals. Monotonicity properties
of intermediate computation results are exploited to
achieve a more efficient interval solution. In
addition, parts of the original solution algorithm are
modified to increase the efficiency of the
corresponding interval arithmetical solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed systems; enterprise JavaBeans; interval
parameters; parameter uncertainties; performance
models; queueing",
}
@Article{El-Sayed:2001:ASS,
author = "Hesham El-Sayed and Don Cameron and Murray Woodside",
title = "Automation support for software performance
engineering",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "301--311",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378799",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To evaluate the performance of a software design one
must create a model of the software, together with the
execution platform and configuration. Assuming that the
`platform': (processors, networks, and operating
systems) are specified by the designer, a good
`configuration' (the allocation of tasks to processors,
priorities, and other aspects of the installation) must
be determined. Finding one may be a barrier to rapid
evaluation; it is a more serious barrier if there are
many platforms to be considered. This paper describes
an automated heuristic procedure for configuring a
software system described by a layered architectural
software model, onto a set of processors, and choosing
priorities. The procedure attempts to meet a
soft-real-time performance specification, in which any
number of scenarios have deadlines which must be
realized some percentage of the time. It has been
successful in configuring large systems with both soft
and hard deadlines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bradshaw:2001:PBP,
author = "Michael K. Bradshaw and Bing Wang and Subhabrata Sen
and Lixin Gao and Jim Kurose and Prashant Shenoy and
Don Towsley",
title = "Periodic broadcast and patching services:
implementation, measurement, and analysis in an
{Internet} streaming video testbed",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "312--313",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378801",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2001:TSR,
author = "Yang Richard Yang and Xiaozhou Li and Simon S. Lam and
Xincheng Zhang",
title = "Towards scalable and reliable group key management",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "314--315",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bremler-Barr:2001:RPC,
author = "Anat Bremler-Barr and Yehuda Afek and Haim Kaplan and
Edith Cohen and Michael Merritt",
title = "Restoration path concatenation: fast recovery of
{MPLS} paths",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "316--317",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new general theory about {\em restoration\/} of
network paths is first introduced. The theory pertains
to restoration of shortest paths in a network following
failure, e.g., we prove that a shortest path in a
network after removing $k$ edges is the concatenation
of at most $k$ + 1 shortest paths in the original
network. The theory is then combined with efficient
path concatenation techniques in MPLS (multi-protocol
label switching), to achieve powerful schemes for
restoration in MPLS based networks. We thus transform
MPLS into a flexible and robust method for forwarding
packets in a network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Savvides:2001:MNW,
author = "Andreas Savvides and Sung Park and Mani B.
Srivastava",
title = "On modeling networks of wireless microsensors",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "318--319",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsigas:2001:EPN,
author = "Philippas Tsigas and Yi Zhang",
title = "Evaluating the performance of non-blocking
synchronization on shared-memory multiprocessors",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "320--321",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel programs running on shared memory
multiprocessors coordinate via shared data
objects/structures. To ensure the consistency of the
shared data structures, programs typically rely on some
forms of software synchronisations. Unfortunately
typical software synchronisation mechanisms usually
result in poor performance because they produce large
amounts of memory and interconnection network
contention and, more significantly, because they
produce convoy effects that degrade significantly in
multiprogramming environments: if one process holding a
lock is preempted, other processes on different
processors waiting for the lock will not be able to
proceed. Researchers have introduced non-blocking
synchronisation to address the above problems.
Non-blocking implementations allow multiple tasks to
access a shared object at the same time, but without
enforcing mutual exclusion to accomplish this. However,
its performance implications are not well understood on
modern systems or on real applications. In this paper
we study the impact of the non-blocking synchronisation
on parallel applications running on top of a modern, 64
processor, cache-coherent, shared memory multiprocessor
system: the SGI Origin 2000. Cache-coherent non-uniform
memory access (ccNUMA) shared memory multiprocessor
systems have attracted considerable research and
commercial interest in the last years. In addition to
the performance results on a modern system, we also
investigate the key synchronisation schemes that are
used in multiprocessor applications and their efficient
transformation to non-blocking ones. Evaluating the
impact of the synchronisation performance on
applications is important for several reasons. First,
micro-benchmarks can not capture every aspect of
primitive performance. It is hard to predict the
primitive impact on the application performance. For
example, a look or barrier that generates a lot of
additional network traffic might have little impact on
applications. Second, even in applications that spend
significant time in synchronisation operations, the
synchronisation time might be dominated by wait time
due to load imbalance and lock serialisation in the
application, which better implementations of
synchronisation may not be helpful in reducing. Third,
micro-benchmarks rarely capture (generate) scenarios
that occur in real applications.\par
We evaluated the benefits of non-blocking
synchronisation in a range of applications running on
top of modern realizations of shared-memory
multiprocessors, a 64 processor SGI Origin 2000. In
this evaluation, (i) we used a big set of applications
with different communication characteristics, making
sure that we include also applications that do not
spend a lot of time in synchronisation, (ii) we also
modified all the lock-based synchronisation points of
these applications when possible. The goal of our work
was to provide an in depth understanding of how
non-blocking can improve the performance of modern
parallel applications. More specifically, the main
issues addressed in this paper include: (i) The
architectural implications of the ccNUMA on the design
of non-blocking synchronisation. (ii) The
identification of the basic locking operations that
parallel programmers use in their applications. (iii)
The efficient non-blocking implementation of these
synchronisation operations. (iv) The experimental
comparison of the lock-based and lock-free versions of
the respective applications on a cache-coherent
non-uniform memory access shared memory multiprocessor
system. (v) The identification of the structural
differences between applications that benefit more from
non-blocking synchronisation than others. We selected
to examine these issues, on a 64 processor SGI Origin
2000 multiprocessor system. This machine is attractive
for the study because it provides an aggressive
communication architecture and support for both in
cache and at memory synchronisation primitives. It
should be clear however that the conclusions and the
methods presented in this paper have general
applicability in other realizations of cache-coherent
non-uniform memory access machines. Our results can
benefit the parallel programmers in two ways. First, to
understand the benefits of non-blocking
synchronisation, and then to transform some typical
lock-based synchronisation operations that are probably
used in their programs to non-blocking ones by using
the general translations that we provide in this
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ng:2001:OHP,
author = "Wee Teck Ng and Bruce K. Hillyer",
title = "Obtaining high performance for storage outsourcing",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "322--323",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The viability of storage outsourcing is critically
dependent on the access performance of remote storage.
We study this issue by measuring the behavior of a
broad variety of I/O-intensive benchmarks as they
access remote storage over an IP network. We measure
the effect of network latencies that correspond to
distances ranging from a local neighborhood to halfway
across a continent. We then measure the effect of
latency-hiding mechanisms. Our results indicate that,
in many cases, the adverse effects of network delay can
be rendered inconsequential by clever file system and
operating system techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padamanabban:2001:DGL,
author = "Venkata N. Padamanabban and Lealkshminarayanan
Subramanian",
title = "Determining the geographic location of {Internet}
hosts",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "324--325",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of determining the geographic
location of an Internet host knowing only its IP
address. We have developed three distinct techniques,
{\em GeoTrack}, {\em GeoPing}, and {\em GeoCluster}, to
address this problem. These techniques exploit
information derived from the DNS, network delay
measurements, and inter-domain routing. We have
evaluated our techniques using extensive and varied
datasets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mandjes:2001:LCA,
author = "Michel Mandjes and Iraj Saniee and Alexander Stolyar",
title = "Load characterization and anomaly detection for voice
over {IP} traffic",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "326--327",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of traffic anomaly detection
in IP networks. Traffic anomalies arise when there is
overload due to failures in a network. We present
general formulae for the variance of the cumulative
traffic over a fixed time interval and show how the
derived analytical expression simplifies for the case
of voice over IP traffic, the focus of this paper. To
detect load anomalies, we show it is sufficient to
consider cumulative traffic over relatively long
intervals such as 5 minutes. This approach
substantially extends the current practice in IP
network management where only the first order
statistics and fixed thresholds are used to identify
abnormal behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "SNMP-based load characterization; variance estimation;
VoIP traffic anomaly detection",
}
@Article{Downey:2001:SCF,
author = "Allen B. Downey",
title = "The structural cause of file size distributions",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "328--329",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a user model that explains the shape of the
distribution of file sizes in local file systems and in
the World Wide Web. We examine evidence from 562 file
systems, 38 web clients and 6 web servers, and find
that the model is a good description of these systems.
These results cast doubt on the widespread view that
the distribution of file sizes is long-tailed and that
long-tailed distributions are the cause of
self-similarity in the Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file sizes; long-tailed distributions;
self-similarity",
}
@Article{Bhargava:2001:UAM,
author = "Rishi Bhargava and Ashish Goel and Adam Meyerson",
title = "Using approximate majorization to characterize
protocol fairness",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "330--331",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mellor-Crummey:2001:PUI,
author = "John Mellor-Crummey and Robert Fowler and David
Whalley",
title = "On providing useful information for analyzing and
tuning applications",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "332--333",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Application performance tuning is a complex process
that requires correlating many types of information
with source code to locate and analyze performance
problems bottle-necks. Existing performance tools don't
adequately support this process in one or more
dimensions. We describe two performance tools, {\em
MHsim\/} and {\em HPCView}, that we built to support
our own work on data layout and optimizing compilers.
Both tools report their results in scope-hierarchy
views of the corresponding source code and produce
their output as HTML databases that can be analyzed
portably and collaboratively using a commodity
browser.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shahabi:2001:ATE,
author = "Cyrus Shahabi and Mohammad R. Kolahdouzan and Greg
Barish and Roger Zimmermann and Didi Yao and Kun Fu and
Lingling Zhang",
title = "Alternative techniques for the efficient acquisition
of haptic data",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "334--335",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Immersive environments are those that surround users
in an artificial world. These environments consist of a
composition of various types of immersidata: unique
data types that are combined to render a virtual
experience. Acquisition, for storage and future
querying, of information describing sessions in these
environments is challenging because of the real-time
demands and sizable amounts of data to be managed. In
this paper, we summarize a comparison of techniques for
achieving the efficient acquisition of one type of
immersidata, the haptic data type, which describes the
movement, rotation, and force associated with
user-directed objects in an immersive environment. In
addition to describing a general process for real-time
sampling and recording of this type of data, we propose
three distinct sampling strategies: fixed, grouped, and
adaptive. We conducted several experiments with a real
haptic device and found that there are tradeoffs
between the accuracy, efficiency, and complexity of
implementation for each of the proposed techniques.
While it is possible to use any of these approaches for
real-time haptic data acquisition, we found that an
adaptive sampling strategy provided the most efficiency
without significant loss in accuracy. As immersive
environments become more complex and contain more
haptic sensors, techniques such as adaptive sampling
can be useful for improving scalability of real-time
data acquisition.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "haptic data acquisition; immersidata; immersive
technologies; sampling",
}
@Article{Dinda:2001:OPR,
author = "Peter A. Dinda",
title = "Online prediction of the running time of tasks",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "336--337",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378836",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almeida:2001:ARB,
author = "Virgil{\'\i}o Almeida and Daniel Menasc{\'e} and
Rudolf Riedi and Fl{\'a}via Peligrinelli and Rodrigo
Fonseca and Wagner {Meira, Jr.}",
title = "Analyzing robot behavior in e-business sites",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "338--339",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378838",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almeida:2001:CUA,
author = "Jussara M. Almeida and Jeffrey Krueger and Mary K.
Vernon",
title = "Characterization of user access to streaming media
files",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "340--341",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2001:PME,
author = "Thomas Bonald and James Roberts",
title = "Performance modeling of elastic traffic in overload",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "342--343",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While providers generally aim to avoid congestion by
adequate provisioning, overload can clearly occur on
certain network links. In this paper we propose some
simple preliminary models for an overloaded link
accounting for user impatience and reattempt
behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qiu:2001:FFI,
author = "Lili Qiu and George Varghese and Subhash Suri",
title = "Fast firewall implementations for software-based and
hardware-based routers",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "344--345",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Routers must perform packet classification at high
speeds to efficiently implement functions such as
firewalls and diffserv. Classification can be based on
an arbitrary number of fields in the packet header.
Performing classification quickly on an arbitrary
number of fields is known to be difficult, and has poor
worst-case complexity. In this paper, we re-examine two
basic mechanisms that have been dismissed in the
literature as being too inefficient: backtracking
search and set pruning tries. We find using real
databases that the time for backtracking search is much
better than the worst-case bound; instead of $ \Omega
((\log N)^{k - 1}) $, the search time is only roughly
twice the optimal search time. Similarly, we find that
set pruning tries (using a DAG optimization) have much
better storage costs than the worst-case bound. We also
propose several new techniques to further improve the
two basic mechanisms. Our major ideas are (i)
backtracking search on a small memory budget, (ii) a
novel compression algorithm, (iii) pipelining the
search, (iv) the ability to trade-off smoothly between
backtracking and set pruning, and (v) algorithms to
effectively make use of hardware if hardware is
available. We quantify the performance gain of each
technique using real databases. We show that on real
firewall databases our schemes, with the accompanying
optimizations, are close to optimal in time and
storage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2001:CRT,
author = "K. Kant and Prasant Mohapatra",
title = "Current research trends in {Internet} servers",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "5--7",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dalal:2001:OSO,
author = "Amy Csizmar Dalal and Scott Jordan",
title = "An optimal service ordering for a {World Wide Web}
server",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "8--13",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider alternative service policies in a web
server with impatient users. User-perceived performance
is modeled as an exponentially decaying function of the
user's waiting time, reflecting the probability that
the user aborts the download before the page is
completely received. The web server is modeled as a
single server queue, with Poisson arrivals and
exponentially distributed file lengths. The server
objective is to maximize average revenue per unit time,
where each user is assumed to pay a reward proportional
to the perceived performance. When file lengths are
i.i.d., we prove that the optimal service policy is
greedy, namely that the server should choose the job
with the highest potential reward. However, when file
lengths are independently drawn from a set of
exponential distributions, we show the optimal policy
need not be greedy; in fact, processor sharing policies
sometimes outperform the best greedy policy in this
case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cardellini:2001:WSS,
author = "Valeria Cardellini and Emiliano Casalicchio and
Michele Colajanni and Marco Mambelli",
title = "{Web} switch support for differentiated services",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "14--19",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572320",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the Web is becoming a medium widely used as a
preferential channel for critical information exchange,
business, and e-commerce, it is necessary to enable
differentiated service mechanisms not only at the
network but also at the Web server level. In this
paper, we propose the concept of {\em Quality of Web
Services\/} (QoWS), which is inspired by the basic
principles of network QoS, while looking at the server
components of the Web system. In particular, we analyze
how QoWS principles can be realized in a Web site
hosted on a Web-server cluster that is, an architecture
composed by multiple Web servers locally distributed
and a single front-end node, called a Web switch. We
propose a new centralized policy, namely {\em
DynamicPartitioning}, which satisfies through dynamic
server partition all basic QoS principles for a Web
switch working at application level. We compare it
against other proposed classes of policies which
implement part or all of basic QoS principles. We
demonstrate through a large set of simulation
experiments under a realistic workload model that
DynamicPartitioning always achieves superior
performance for the high service class, at the price of
some penalty for low service classes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed systems; load sharing; performance
evaluation; quality of service",
}
@Article{Voigt:2001:KBC,
author = "Thiemo Voigt and Per Gunningberg",
title = "Kernel-based control of persistent {Web} server
connections",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "20--25",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several overload admission control architectures have
been developed to protect web servers from overload.
Some of these architectures base their admission
decision on information found in the HTTP header. In
this context, persistent connections represent a
challenging problem since the HTTP header of the first
request does not reveal any information about the
resource consumption of the requests that might follow
on the same connection. In this paper, we present an
architecture that prevents uncontrollable server
overload caused by persistent connections. We evaluate
our approach by various experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2001:BPI,
author = "Jun Wang and Rui Min and Zhuying Wu and Yiming Hu",
title = "Boosting {I/O} performance of {Internet} servers with
user-level custom file systems",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "26--31",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous studies have shown that disk I/O times are
one of the major performance bottlenecks of Internet
servers such as proxy cache servers. Most conventional
file systems do not work well for such systems because
of their very high overheads. Although Special-purpose
operating systems may achieve high performance, it is
very difficult and expensive to design and maintain.
They also have very poor portability. In this paper we
propose to built user-space, customized file systems
for Internet servers so as to achieve high-performance,
low-implementation-cost and good portability at the
same time. To provide an example of such systems, we
presented a novel scheme called {\em WPSFS\/} that can
drastically improve I/O performance of proxy servers
and other applications. WPSFS is an application-level
software component of a proxy server which manages data
on a raw disk or disk partition. Since the entire
system runs in the user space, it is easy and
inexpensive to implement. It also has good portability
and maintainability. With efficient in-memory meta-data
data structures and a novel file system called {\em
Page-structured file system(PFS)}, WPSFS achieves 9-20
times better I/O performance than the state-of-the-art
SQUID server running on a Unix Fast File System, and
4-10 times better than the improved SQUIDML.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2001:CDP,
author = "Xin Chen and Xiaodong Zhang",
title = "Coordinated data prefetching by utilizing reference
information at both proxy and {Web} servers",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "32--38",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing prefetching techniques rely on server-based,
proxy-based, or client-based reference access
information. Although Web servers may provide accurate
access information, our studies show that significant
communication overhead can be involved by sending
unnecessary reference information to clients or/and
proxy servers. Our study also shows that prediction
accuracy of proxy-based prefetching can be
significantly limited without input of Web servers. We
propose a {\em coordinated proxy-server prefetching
technique\/} that adaptively utilizes the reference
information and coordinates prefetching activities at
both proxy and web servers. In our design, the
reference access information stored in proxy servers
will be the main source serving data prefetching for
groups of clients, each of whom shares the common
surfing interests. The access information in the web
server will be used to serve data prefetching only for
data objects that are not qualified for proxy-based
prefetching. Conducting trace-driven simulations, we
show that both hit ratios and byte hit ratios
contributed from coordinated proxy-server prefetching
are up to 88\% higher than that from proxy-based
prefetching, and they are comparable to the ratios from
server-based prefetching with a difference of 5\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardaiz:2001:IST,
author = "Oscar Ardaiz and Felix Freitag and Leandro Navarro",
title = "Improving the service time of {Web} clients using
server redirection",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "39--44",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes and evaluates experimentally a
web server infrastructure, which consists of a small
number of servers that redirect client requests based
on the estimated client service time. The web servers
have replicated content, are located in geographically
different regions, and redirect clients between
servers. The web servers use metrics obtained from
server logs to estimate the service time of a client.
Based on the estimated service time the server
redirects the web client. The implementation of the
measurement and redirection mechanism is done in the
web servers and is independent of the clients. Using
server logs the measuring mechanism does not introduce
traffic into the network. We have experimentally
evaluated the proposed web server infrastructure. In
our experiments the client service time improved from 4
to 40\% when using the proposed mechanism. The web
server infrastructure could be applied to improve the
service time of selected clients, which frequently
access a web server to retrieve a significant amount of
data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2001:GGI,
author = "Shudong Jin and Azer Bestavros",
title = "{GISMO}: a {Generator of Internet Streaming Media
Objects} and workloads",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "2--10",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a tool called GISMO (Generator of
Internet Streaming Media Objects and workloads). GISMO
enables the specification of a number of streaming
media access characteristics, including object
popularity, temporal correlation of request, seasonal
access patterns, user session durations, user
inter-activity times, and variable bit-rate (VBR)
self-similarity and marginal distributions. The
embodiment of these characteristics in GISMO enables
the generation of realistic and scalable request
streams for use in the benchmarking and comparative
evaluation of Internet streaming media delivery
techniques. To demonstrate the usefulness of GISMO, we
present a case study that shows the importance of
various workload characteristics in determining the
effectiveness of proxy caching and server patching
techniques in reducing bandwidth requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2001:SIWb,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA 2001)}",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "11--11",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bansal:2001:AMG,
author = "Nikhil Bansal and Mor Harchol-Balter",
title = "Analysis of {M/G/1/SRPT} under transient overload",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "12--14",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This short paper contains an approximate analysis for
the M/G/1/SRPT queue under alternating periods of
overload and low load. The result in this paper along
with several other results on systems under transient
overload are contained in our recent technical report
[2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2001:ACA,
author = "E. Bachmat",
title = "Average case analysis for batched disk scheduling and
increasing subsequences",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "15--16",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riabov:2001:SPT,
author = "Anton Riabov and Jay Sethuraman",
title = "Scheduling periodic task graphs with communication
delays",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "17--18",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of finding an optimal
assignment of tasks, which constitute a parallel
application, to an unlimited number of identical
processors. The precedence constraints among the tasks
are given in the form of a directed acyclic graph
(DAG). We are given processing times for each task and
the communication delays between precedence-constrained
tasks, which are incurred if the corresponding tasks
are executed on different processors. Furthermore, the
system must be able to process real-time periodic input
with a fixed period. This problem occurs, for example,
in multiprocessor scheduling of video processing
applications, where each frame has to be processed by a
number of software filters, and some filters use data
pre-processed by other filters, thus forming a DAG of
data dependencies. We formulate several variants of
this problem, and briefly discuss some of our results
for special precedence graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fourneau:2001:GNR,
author = "Jean-Michel Fourneau and Erol Gelenbe",
title = "{G}-networks with resets",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "19--20",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gelenbe Networks (G-networks) are a class of queuing
models which include new types of customers called
`signals,' which are either `negative customers' and
`triggers' [1, 2]. Queuing networks typically do not
have provisions for some customers being used to
eliminate other customers, or to redirect other
customers among the queues. In other words, customers
in traditional queuing networks cannot exert direct
control on other customers. G-network models overcome
some of these limitations and still preserve the
computationally attractive `product form' property of
certain Markovian queuing networks. In addition to
ordinary customers, G-networks contain `negative
customers' which eliminate normal customers, and
`triggers' which move other customers from some queue
to another [4, 5]. Multiple class versions of these
models are discussed in [7, 8], and in [9] many
additional results are provided. These queuing networks
have generated much interest in the literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shalmon:2001:QAP,
author = "Michael Shalmon",
title = "Queueing analysis for polling and prioritized service
of aggregated regenerative variable rate {ON-OFF}
traffic sources",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "20--20",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bain:2001:MPD,
author = "Alan Bain and Peter Key",
title = "Modelling the performance of distributed admission
control for adaptive applications",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "21--22",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507562",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2001:LBB,
author = "Cheng-Shang Chang and Duan-Shin Lee and Ching-Ming
Lien",
title = "Load balanced {Birkhoff--von Neumann} switches with
resequencing",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "23--24",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In [2], we proposed the load balanced Birkhoff--von
Neumann switch with one-stage buffering (see Figure 1).
Such a switch consists of two stages of crossbar
switching fabrics and one stage of buffering. The
buffer at the input port of the second stage uses the
Virtual Output Queueing (VOQ) technique to solve the
problem of head-of-line blocking. In such a switch,
packets are of the same size. Also, time is slotted and
synchronized so that exactly one packet can be
transmitted within a time slot. In a time slot, both
crossbar switches set up connection patterns
corresponding to permutation matrices that are
periodically generated from a one-cycle permutation
matrix.\par
The reasoning behind such a switch architecture is as
follows: since the connection patterns are periodic,
packets from the same input port of the first stage are
distributed in a round-robin fashion to the second
stage according to their arrival times. Thus, the first
stage performs load balancing for the incoming traffic.
As the traffic coming into the second stage is load
balanced, it suffices to use simple periodic connection
patterns to perform switching at the second stage. This
is shown in [2] as a special case of the original
Birkhoff-von Neumann decomposition used in [1]. There
are several advantages of using such an architecture,
including scalability, low hardware complexity, 100\%
throughput, low average delay in heavy load and bursty
traffic, and efficient buffer usage. However, the main
drawback of the load balanced Birkhoff-von Neumann
switch with one-stage buffering is that packets might
be out of sequence.\par
The main objective of this paper is to solve the
out-of-sequence problem that occurs in the load
balanced Birkhoff-von Neumann switch with one-stage
buffering. One quick fix is to add a
resequencing-and-output buffer after the second stage.
However, as packets are distributed according to their
{\em arrival times\/} at the first stage, there is no
guarantee on the size of the resequencing-and-output
buffer to prevent packet losses. For this, one needs to
distributed packets according to their {\em flows}, as
indicated in the paper by Iyer and McKeown [5]. This is
done by adding a flow splitter and a load-balancing
buffer in front of the first stage (see Figure 2). For
an $ N \times N $ switch, the load-balancing buffer at
each input port of the first stage consists of $N$
virtual output queues (VOQ) destined for the $N$ output
ports of that stage. Packets form the same {\em flow\/}
are split in the round-robin fashion to the $N$ virtual
output queues and scheduled under the First Come First
Served (FCFS) policy. By so doing, load balancing can
be achieved for each flow as packets from the same flow
are split almost evenly to the input ports of the
second stage. More importantly, as pointed out in [5],
the delay and the buffer size of the load-balancing
buffer are bounded by constants that only depend on the
size of the switch and the number of flows. The
resequencing-and-output buffer after the second stage
not only performs resequencing to keep packets in
sequence, but also stores packets waiting for
transmission from the output links.\par
In this paper, we consider a traffic model with
multicasting flows. This is a more general model than
the point-to-point traffic model in [5]. A multicasting
flow is stream of packets that has one common input and
a set of common outputs. For the multicasting flows,
fanout splitting (see e.g., [4]) is performed at the
central buffers (the VOQ in front of the second stage).
The central buffers are assumed to be infinite so that
no packets are lost in the switch. We consider two
types of scheduling policies in the central buffers:
the FCFS policy and the Earliest Deadline First (EDF)
policy. For the FCFS policy, a jitter control
mechanism, is added in the VOQ in front of the second
stage. Such a jitter control mechanism delays every
packet to its maximum delay at the first stage so that
the flows entering the second stage are simply
time-shifted flows of the original ones. Our main
result for the FCFS scheme with jitter controls is the
following theorem. The proof of Theorem 1 is shown in
the full report [3].\par
Theorem 1: Suppose that all the buffers are empty at
time 0. Then the followings hold for FCFS scheme with
jitter control.\par
(i) The end-to-end delay for a packet through our
switch with multi-stage buffering is bounded above by
the sum of the delay through the corresponding FCFS
output-buffered switch and $ N L_{\rm max} + (N + 1)
M_{\rm max}$, where $ L_{\rm max}$ (resp. $ M_{\rm
max}$) is the maximum number of flows at an input
(resp. output) port.\par
(ii) The load-balancing buffer at an input port of the
first stage is bounded above by $ N L_{\rm
max}$.\par
(iii) The delay through the load-balancing buffer at an
input port of the first stage is bounded above by $ N
L_{\rm max}$.\par
(iv) The resequencing-and-output buffer at an output
port of the second stage is bounded above $ (N + 1)
M_{\rm max}$.\par
In the EDF scheme (see Figure 3), every packet is
assigned a deadline that is the departure time from the
corresponding FCFS output-buffered switch. Packets are
scheduled according to their deadlines in the central
buffers. For the EDF scheme, there is no need to
implement the jitter control mechanism in the FCFS
scheme. As such, average packet delay can be greatly
reduced. However, as there is no jitter control, one
might need a larger resequencing buffer than that in
the FCFS scheme with jitter control. Since the first
stage is the same as that in the FCFS scheme, both the
delay and the buffer size of the load-balancing buffer
are still bounded by $ N L_{\rm max}$. Moreover, we
show the following theorem for the EDF scheme. Its
proof is given in the full report [3].\par
Theorem 2: Suppose that all the buffers are empty at
time 0. Then the followings hold for the EDF
scheme.\par
(i) The end-to-end delay for a packet through our
switch with multi-stage buffering is bounded above by
the sum of the delay through the corresponding FCFS
output-buffered switch and $ N (L_{\rm max} + M_{\rm
max})$.\par
(ii) The resequencing-and-output buffer at an output
port of the second stage is bounded above $ N (L_{\rm
max} + M_{\rm max})$.\par
Computing the departure times from the corresponding
FCFS output-buffered switch needs global information of
all the inputs. A simple way is to use the packet
arrival times as deadlines. Then the EDF scheme based
on arrival times yields the same departure order except
those packets that arrives at same time. Since there
are at most $ M_{\rm max}$ packets that can arrive at
the same time to an output port of the corresponding
output-buffered switch, the end-to-end delay for a
packet through the multi-stage switch using arrival
times as deadlines is bounded above by the sum of the
delay through the corresponding FCFS output-buffered
switch and $ N L_{\rm max} + (N + 1) M_{\rm max}$.
Also, the resequencing-and-output buffer at an output
port of the second stage in this case is bounded above
$ N L_{\rm max} + (N + 1) M_{\rm max}$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kogan:2001:AEP,
author = "Yaakov Kogan",
title = "Asymptotic expansions for probability distributions in
large loss and closed queueing networks",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "25--27",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using integral representation in complex space and the
saddle point method asymptotic expansions for
probability distributions are derived for the
generalised Engset model and a closed queueing network
with multiple classes. The results can be applied to
bandwidth engineering and admission control in data
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2001:KLM,
author = "Yuliy Baryshnikov and E. G. {Coffman, Jr.} and Predrag
Jelenkovi{\'c}",
title = "{Kelly}'s {LAN} model revisited",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "28--29",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For a given $ k \geq 1 $, subintervals of a given
interval $ [0, X] $ arrive at random and are accepted
(allocated) so long as they overlap fewer than $k$
subintervals already accepted. Subintervals not
accepted are cleared, while accepted subintervals
remain allocated for random retention times before they
are released and made available to subsequent arrivals.
Thus, the system operates as a generalized many-server
queue under a loss protocol. We study a discretized
version of this model that appears in reference
theories for a number of applications; the one of most
interest here is linear communication networks, a model
originated by Kelly [2]. Other applications include
surface adsorption/desorption processes and reservation
systems [3, 1].\par
The interval $ [0, X]$, $X$ an integer, is subdivided
by the integers into slots of length $1$. An {\em
interval\/} is always composed of consecutive slots,
and a configuration $C$ of intervals is simply a finite
set of intervals in $ [0, X]$. A configuration $C$ is
{\em admissible\/} if every non-integer point in $ [0,
X]$ is covered by at most $k$ intervals in $C$. Denote
the set of admissible configurations on the interval $
[0, X]$ by $ C_X$. Assume that, for any integer point
{\em i}, intervals of length $l$ with left endpoint $i$
arrive at rate $ \lambda_l$; the arrivals of intervals
at different points and of different lengths are
independent. A newly arrived interval is included in
the configuration if the resulting configuration is
admissible; otherwise the interval is rejected. It is
convenient to assume that the arrival rates $
\lambda_l$ vanish for all but a finite number of
lengths $l$, say $ \lambda_l > 0$, $ 1 \leq l \leq L$,
and $ \lambda_l = 0$ otherwise.\par
The departure of intervals from configurations has a
similar description: the flow of `killing' signals for
intervals of length $l$ arrive at each integer $i$ at
rate $ \mu_l$. If at the time such a signal arrives,
there is at least one interval of length $l$ with its
left endpoint at $i$ in the configuration, then one of
them leaves.\par
Our primary interest is in steady-state estimates of
the vacant space, i.e., the total length of available
subintervals $ k X - \sum l_i$, where the $ l_i$ are
the lengths of the subintervals currently allocated. We
obtain explicit results for $ k = 1$ and for general
$k$ with all subinterval lengths equal to 2, the
classical {\em dimer\/} case of chemical applications.
Our analysis focuses on the asymptotic regime of large
retention times, and brings out an apparently new,
broadly useful technique for extracting asymptotic
behavior from generating functions in two
dimensions.\par
Our model, as proposed by Kelly [2], arises in a study
of one-dimensional communication networks (LAN's). In
this application, intervals correspond to the circuits
connecting communicating parties and $ [0, X]$
represents the bus. Kelly's main results apply to the
case $ k = 1$ and to the case of general $k$ with
interval lengths governed by a geometric law.\par
The focus here is on space utilization, so the results
here add to the earlier theory in three principal ways.
First, we give expected vacant space for $ k = 1$, with
special emphasis on small-$ \mu $ asymptotics. Behavior
in this regime is quite different from that seen in the
`jamming' limit (absorbing state) of the pure filling
model (all $ \mu $'s are identically 0). Second, the
important dimer case of chemical applications, where
all intervals have length 2, is covered. Finally, the
approach of the analysis itself appears to be new and
to hold promise for the analysis of similar Markov
chains. In very broad terms, expected vacant space is
expressed in terms of the geometric properties of a
certain plane curve defined by a bivariate generating
function.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2001:SOB,
author = "David Gamarnik",
title = "Stochastic online binpacking problem: exact conditions
for bounded expected queue lengths under the best fit
packing heuristic",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "30--31",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the following stochastic bin packing
process: the items of different sizes arrive at times
$t$ = 0, 1, 2, \ldots{} and are packed into unit size
bins using `largest first' rule. The unpacked items
form queues. Coffman and Stolyar [3] introduced this
system and posed the following question: under which
conditions expected queue lengths are bounded (system
is stable)? We provide exact computable conditions for
stability of this system using Lyapunov function
technique. The result holds for a very general class of
distributions of the arrival processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:2001:SCS,
author = "S. Lam and Rocky K. C. Chang",
title = "Stability comparison in single-server-multiple-queue
systems",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "32--34",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study stability comparison among
queues in single-server-multiple-queue systems. We
establish trichotomy between two queues in terms of
stability. We introduce a concept of degree of
instability which reflects the stability level of an
individual queue. Through comparing the degrees of
instabilities of two queues, we give conditions under
which two queues are as stable as each other and, one
queue is more (less) stable than the other. We also
generalize previous results regarding to stability
ranking or stability ordering, and accommodate them
into our general form.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Szlavik:2001:GGT,
author = "{\'A}rp{\'a}d Szl{\'a}vik",
title = "{GI/G/1} type processes: a non-inversive matrix
analytical solution",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "35--37",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new general solution method is derived for the
general {GI/G/1} type processes --- for the
steady-state distribution of infinite block-structured
Markov chains with repetitive structure. While matrix
inversion is needed in each iterational step of other
general (and of more special) matrix analytical
procedures, the method presented here uses matrix
addition and matrix multiplication only. In exchange,
the computational complexity and the memory requirement
is increasing in each iterational step of the proposed
method. This paper, however, lays priority on the
theoretical aspect of the general solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boots:2001:STP,
author = "Nam Kyoo Boots and Perwez Shahabuddin",
title = "Simulating tail probabilities in {GI/GI/1} queues and
insurance risk processes with subexponential
distributions (extended abstract)",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "38--39",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:2001:GPS,
author = "Sem Borst and Michel Mandjes and Miranda van Uitert",
title = "Generalized processor sharing with heterogeneous
traffic classes",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "40--42",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system with two heterogeneous traffic
classes, one having light-tailed characteristics, the
other one exhibiting heavy-tailed properties. The two
traffic classes are served in accordance with the
Generalized Processor Sharing (GPS) discipline.
GPS-based scheduling algorithms, such as Weighted Fair
Queueing (WFQ), have emerged as an important mechanism
for achieving service differentiation in
integrated-services networks. We determine the workload
asymptotics of the light-tailed class for the situation
where its GPS weight is larger than its traffic
intensity. The GPS mechanism ensures that the workload
is bounded above by that in an isolated system with the
light-tailed class served in isolation at a constant
rate equal to its GPS weight. We show that the workload
distribution is in fact asymptotically equivalent to
that in the isolated system, multiplied with a certain
pre-factor, which accounts for the interaction with the
heavy-tailed class. Specifically, the pre-factor
represents the probability that the heavy-tailed class
is backlogged long enough for the light-tailed class to
reach overflow. The results provide crucial qualitative
insight in the typical overflow scenario.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2001:MSL,
author = "Zhen Liu and Mark S. Squillante and Joel L. Wolf",
title = "On maximizing service-level-agreement profits",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "43--44",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an initial study of a methodology for
maximizing profits in a general class of e-commerce
environments under a cost model in which revenues are
generated when QoS guarantees are satisfied and
penalties are incurred otherwise. The QoS guarantees
are based on multiclass SLAs between service providers
and their clients, which include the tail distributions
of the per-class response times. Our approach consists
of formulating the resulting optimization problem as a
network flow model with a separable set of concave
objective function summands based on derived
queueing-theoretic formulas. This problem is then
solved in a very efficient manner via a fixed-point
iteration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2001:PAA,
author = "Yingdong Lu and Jing-Sheng Song and Weian Zheng",
title = "Performance analysis of assemble-to-order systems
through strong approximations",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "45--46",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2001:OSQ,
author = "Mark S. Squillante and Cathy H. Xia and Li Zhang",
title = "Optimal scheduling in queueing network models of
high-volume commercial {Web} sites",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "47--48",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The optimal control of performance measures in
high-volume commercial web sites requires a fundamental
understanding of the interactions between the diverse
set of Internet services that support customer needs
and the different importance levels of these services
to both the customer and the e-commerce merchant. We
present a study of the server control policy in a
multiclass queueing network that maximizes a particular
function of profit, or minimizes a particular function
of cost, across the different classes of Internet
services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:2002:SPC,
author = "Kenneth C. Sevcik and Hai Wang",
title = "Solution properties and convergence of an approximate
mean value analysis algorithm",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "3--10",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the solution properties and convergence
results of an approximate Mean Value Analysis (MVA)
algorithm, the Queue Line (QL) algorithm, for solving
separable queueing networks. We formally prove that the
QL algorithm is always more accurate than, and yet has
the same computational complexity as the
Bard-Schweitzer Proportional Estimation algorithm, the
most popular approximate MVA algorithm for solving this
type of queueing networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:2002:CCA,
author = "Carey Williamson and Qian Wu",
title = "A case for context-aware {TCP\slash IP}",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "11--23",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the design and evaluation of
CATNIP, a Context-Aware Transport/Network Internet
Protocol for the Web. This integrated protocol uses
application-layer knowledge (i.e., Web document size)
to provide explicit context information to the TCP and
IP protocols. While this approach violates the
traditional layered Internet protocol architecture, it
enables informed decision-making, both at network
endpoints and at network routers, regarding flow
control, congestion control, and packet discard
decisions. We evaluate the performance of the
context-aware TCP/IP approach first using ns-2 network
simulation, and then using WAN emulation to test a
prototype implementation of CATNIP in the Linux kernel
of an Apache Web server. The advantages of the CATNIP
approach are particularly evident in a congested
Internet with 1-10\% packet loss. Simulation results
indicate a 10-20\% reduction in TCP packet loss using
simple endpoint control mechanisms, with no adverse
impact on Web page retrieval times. More importantly,
using CATNIP context information at IP routers can
reduce mean Web page retrieval times by 20-80\%, and
the standard deviation by 60-90\%. The CATNIP algorithm
can also interoperate with Random Early Detection (RED)
for active queue management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet protocols; network emulation; network
simulation; TCP/IP; web performance",
}
@Article{Menasce:2002:SAM,
author = "Daniel A. Menasc{\'e}",
title = "Simple analytic modeling of software contention",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "24--30",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512844",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Being able to model contention for software resources
(e.g., a critical section or database lock) is
paramount to building performance models that capture
all aspects of the delay encountered by a process as it
executes. Several methods have been offered for dealing
with software contention and with message blocking in
client-server systems. We present in this paper a
simple, straightforward, easy to understand and
implement, approach to modeling software contention
using queuing networks. The approach consists of a
two-level iterative process. Two queuing networks are
considered: one represents software resources and the
other hardware resources. Multiclass models are allowed
and both open and closed queuing networks can be used
at the software layer. Any solution technique----exact
or approximate--can be used at any of the levels. This
technique falls in the general nature of fixed-point
approximate models and is similar in nature to other
approaches. The main difference lies in its
simplicity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cheng:2002:PSB,
author = "William C. Cheng and Cheng-Fu Chou and Leana Golubchik
and Samir Khuller",
title = "A performance study of {Bistro}, a scalable upload
architecture",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "31--39",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hot spots are a major obstacle to achieving
scalability in the Internet. We have observed that the
existence of hot spots in {\em upload\/} applications
(whose examples include submission of income tax forms
and conference paper submission) is largely due to
approaching deadlines. The hot spot is exacerbated by
the long transfer times. To address this problem, we
proposed {\em Bistro}, a framework for building
scalable wide-area upload applications, where we employ
intermediaries, termed {\em bistros}, for improving the
efficiency and scalability of uploads. Consequently,
appropriate assignment of clients to {\em bistros\/}
has a significant effect on the performance of upload
applications and thus constitutes an important research
problem. Therefore, in this paper we focus on the
assignment of clients to {\em bistros\/} problem and
present a performance study which demonstrates the
potential performance gains of the {\em Bistro\/}
framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lawson:2002:MQB,
author = "Barry G. Lawson and Evgenia Smirni",
title = "Multiple-queue backfilling scheduling with priorities
and reservations for parallel systems",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "40--47",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe a new, non-FCFS policy to schedule
parallel jobs on systems that may be part of a
computational grid. Our algorithm continuously monitors
the system (i.e., intensity of incoming jobs and
variability of their resource demands) and continuously
adapts its scheduling parameters to sudden workload
fluctuations. The proposed policy is based on
backfilling which permits job rearrangement in the
waiting queue. By exploiting otherwise idle processors,
this rearrangement reduces fragmentation of system
resources, thereby providing higher system utilization.
We propose to maintain multiple job queues that
effectively separate jobs according to their projected
execution time. Our policy supports different job
priority classes as well as job reservations, making it
appropriate for scheduling jobs on parallel systems
that are part of a computational grid. Detailed
performance comparisons via simulation using traces
from the Parallel Workload Archive indicate that the
proposed policy consistently outperforms traditional
scheduling approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "backfilling schedulers; batch schedulers;
computational grids; parallel systems; performance
analysis",
}
@Article{Pasztor:2002:PBP,
author = "Attila P{\'a}sztor and Darryl Veitch",
title = "{PC} based precision timing without {GPS}",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "1--10",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511336",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A highly accurate monitoring solution for active
network measurement is provided without the need for
GPS, based on an alternative software clock for PC's
running Unix. With respect to clock {\em rate}, its
performance exceeds common GPS and NTP synchronized
software clock accuracy. It is based on the TSC
register counting CPU cycles and offers a resolution of
around 1ns, a rate stability of 0.1PPM equal to that of
the underlying hardware, and a processing overhead well
under 1$ \mu $ s per timestamp. It is scalable and can
be run in parallel with the usual clock. It is argued
that accurate rate, and not synchronised offset, is the
key requirement of a clock for network measurement. The
clock requires an accurate estimation of the CPU cycle
period. Two calibration methods which do not require a
reference clock at the calibration point are given. To
the TSC clock we add timestamping optimisations to
create two high accuracy monitors, one based on Linux
and the other on Real-Time Linux. The TSC-RT-Linux
monitor has offset fluctuations of the order of 1$ \mu
$ s. The clock is ideally suited for high precision
active measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GPS; network measurement; NTP; PC clocks; software
clock; synchronization; timing",
}
@Article{Coates:2002:MLN,
author = "Mark Coates and Rui Castro and Robert Nowak and Manik
Gadhiok and Ryan King and Yolanda Tsang",
title = "Maximum likelihood network topology identification
from edge-based unicast measurements",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "11--20",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511337",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network tomography is a process for inferring
`internal' link-level delay and loss performance
information based on end-to-end (edge) network
measurements. These methods require knowledge of the
network topology; therefore a first crucial step in the
tomography process is topology identification. This
paper considers the problem of discovering network
topology solely from host-based, unicast measurements,
without internal network cooperation. First, we
introduce a novel delay-based measurement scheme that
does not require clock synchronization, making it more
practical than other previous proposals. In contrast to
methods that rely on network cooperation, our
methodology has the potential to identify layer two
elements (provided they are logical topology branching
points and induce some measurable delay). Second, we
propose a maximum penalized likelihood criterion for
topology identification. This is a global optimality
criterion, in contrast to other recent proposals for
topology identification that employ suboptimal,
pair-merging strategies. We develop a novel Markov
Chain Monte Carlo (MCMC) procedure for rapid
determination of the most likely topologies. The
performance of our new probing scheme and
identification algorithm is explored through simulation
and Internet experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bu:2002:NTG,
author = "Tian Bu and Nick Duffield and Francesco {Lo Presti}
and Don Towsley",
title = "Network tomography on general topologies",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "21--30",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511338",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider the problem of inferring
link-level loss rates from end-to-end multicast
measurements taken from a collection of trees. We give
conditions under which loss rates are identifiable on a
specified set of links. Two algorithms are presented to
perform the link-level inferences for those links on
which losses can be identified. One, the {\em minimum
variance weighted average (MVWA) algorithm\/} treats
the trees separately and then averages the results. The
second, based on {\em expectation-maximization (EM)\/}
merges all of the measurements into one computation.
Simulations show that EM is slightly more accurate than
MVWA, most likely due to its more efficient use of the
measurements. We also describe extensions to the
inference of link-level delay, inference from
end-to-end unicast measurements, and inference when
some measurements are missing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2002:LEL,
author = "Song Jiang and Xiaodong Zhang",
title = "{LIRS}: an efficient low inter-reference recency set
replacement policy to improve buffer cache
performance",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "31--42",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511340",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although LRU replacement policy has been commonly used
in the buffer cache management, it is well known for
its inability to cope with access patterns with weak
locality. Previous work, such as LRU-K and 2Q, attempts
to enhance LRU capacity by making use of additional
history information of previous block references other
than only the recency information used in LRU. These
algorithms greatly increase complexity and/or can not
consistently provide performance improvement. Many
recently proposed policies, such as UBM and SEQ,
improve replacement performance by exploiting access
regularities in references. They only address LRU
problems on certain specific and well-defined cases
such as access patterns like sequences and loops.
Motivated by the limits of previous studies, we propose
an efficient buffer cache replacement policy, called
{\em Low Inter-reference Recency Set\/} (LIRS). LIRS
effectively addresses the limits of LRU by using
recency to evaluate Inter-Reference Recency (IRR) for
making a replacement decision. This is in contrast to
what LRU does: directly using recency to predict next
reference timing. At the same time, LIRS almost retains
the same simple assumption of LRU to predict future
access behavior of blocks. Our objectives are to
effectively address the limits of LRU for a general
purpose, to retain the low overhead merit of LRU, and
to outperform those replacement policies relying on the
access regularity detections. Conducting simulations
with a variety of traces and a wide range of cache
sizes, we show that LIRS significantly outperforms LRU,
and outperforms other existing replacement algorithms
in most cases. Furthermore, we show that the additional
cost for implementing LIRS is trivial in comparison
with LRU.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2002:MAD,
author = "Mark S. Squillante and Yanyong Zhang and Anand
Sivasubramaniam and Natarajan Gautam and Hubertus
Franke and Jose Moreira",
title = "Modeling and analysis of dynamic coscheduling in
parallel and distributed environments",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "43--54",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511341",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling in large-scale parallel systems has been
and continues to be an important and challenging
research problem. Several key factors, including the
increasing use of off-the-shelf clusters of
workstations to build such parallel systems, have
resulted in the emergence of a new class of scheduling
strategies, broadly referred to as dynamic
coscheduling. Unfortunately, the size of both the
design and performance spaces of these emerging
scheduling strategies is quite large, due in part to
the numerous dynamic interactions among the different
components of the parallel computing environment as
well as the wide range of applications and systems that
can comprise the parallel environment. This in turn
makes it difficult to fully explore the benefits and
limitations of the various proposed dynamic
coscheduling approaches for large-scale systems solely
with the use of simulation and/or experimentation. To
gain a better understanding of the fundamental
properties of different dynamic coscheduling methods,
we formulate a general mathematical model of this class
of scheduling strategies within a unified framework
that allows us to investigate a wide range of parallel
environments. We derive a matrix-analytic analysis
based on a stochastic decomposition and a fixed-point
iteration. A large number of numerical experiments are
performed in part to examine the accuracy of our
approach. These numerical results are in excellent
agreement with detailed simulation results. Our
mathematical model and analysis is then used to explore
several fundamental design and performance tradeoffs
associated with the class of dynamic coscheduling
policies across a broad spectrum of parallel computing
environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2002:AMS,
author = "Eitan Bachmat and Jiri Schindler",
title = "Analysis of methods for scheduling low priority disk
drive tasks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "55--65",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511342",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper analyzes various algorithms for scheduling
low priority disk drive tasks. The derived closed form
solution is applicable to class of greedy algorithms
that include a variety of background disk scanning
applications. By paying close attention to many
characteristics of modern disk drives, the analytical
solutions achieve very high accuracy---the difference
between the predicted response times and the
measurements on two different disks is only 3\% for all
but one examined workload. This paper also proves a
theorem which shows that background tasks implemented
by greedy algorithms can be accomplished with very
little seek penalty. Using greedy algorithm gives a
10\% shorter response time for the foreground
application requests and up to a 20\% decrease in total
background task run time compared to results from
previously published techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snavely:2002:SJP,
author = "Allan Snavely and Dean M. Tullsen and Geoff Voelker",
title = "Symbiotic jobscheduling with priorities for a
simultaneous multithreading processor",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "66--76",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511343",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simultaneous Multithreading machines benefit from
jobscheduling software that monitors how well
coscheduled jobs share CPU resources, and coschedules
jobs that interact well to make more efficient use of
those resources. As a result, informed coscheduling can
yield significant performance gains over naive
schedulers. However, prior work on coscheduling focused
on equal-priority job mixes, which is an unrealistic
assumption for modern operating systems. This paper
demonstrates that a scheduler for an SMT machine can
both satisfy process priorities and symbiotically
schedule low and high priority threads to increase
system throughput. Naive priority schedulers dedicate
the machine to high priority jobs to meet priority
goals, and as a result decrease opportunities for
increased performance from multithreading and
coscheduling. More informed schedulers, however, can
dynamically monitor the progress and resource
utilization of jobs on the machine, and dynamically
adjust the degree of multithreading to improve
performance while still meeting priority goals. Using
detailed simulation of an SMT architecture, we
introduce and evaluate a series of five software and
hardware-assisted priority schedulers. Overall, our
results indicate that coscheduling priority jobs can
significantly increase system throughput by as much as
40\%, and that (1) the benefit depends upon the
relative priority of the coscheduled jobs, and (2) more
sophisticated schedulers are more effective when the
differences in priorities are greatest. We show that
our priority schedulers can decrease average turnaround
times for a random job mix by as much as 33\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "job scheduling; priorities; simultaneous
multithreading",
}
@Article{Harrison:2002:PTD,
author = "Peter G. Harrison and William J. Knottenbelt",
title = "Passage time distributions in large {Markov} chains",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "77--85",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Probability distributions of response times are
important in the design and analysis of transaction
processing systems and computer-communication systems.
We present a general technique for deriving such
distributions from high-level modelling formalisms
whose state spaces can be mapped onto finite Markov
chains. We use a load-balanced, distributed
implementation to find the Laplace transform of the
first passage time density and its derivatives at
arbitrary values of the transform parameter $s$.
Setting $ s = 0$ yields moments while the full passage
time distribution is obtained using a novel distributed
Laplace transform inverter based on the Laguerre
method. We validate our method against a variety of
simple densities, cycle time densities in certain
overtake-free (tree-like) queueing networks and a
simulated Petri net model. Our implementation is
thereby rigorously validated and has already been
applied to substantial Markov chains with over 1
million states. Corresponding theoretical results for
semi-Markov chains are also presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2002:EAS,
author = "Alma Riska and Evgenia Smirni",
title = "Exact aggregate solutions for {M/G/1}-type {Markov}
processes",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "86--96",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511346",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new methodology for the exact analysis
of M/G/1-type Markov processes. The methodology uses
basic, well-known results for Markov chains by
exploiting the structure of the repetitive portion of
the chain and recasting the overall problem into the
computation of the solution of a finite linear system.
The methodology allows for the calculation of the
aggregate probability of a finite set of classes of
states from the state space, appropriately defined.
Further, it allows for the computation of a set of
measures of interest such as the system queue length or
any of its higher moments. The proposed methodology is
exact. Detailed experiments illustrate that the
methodology is also numerically stable, and in many
cases can yield significantly less expensive solutions
when compared with other methods, as shown by detailed
time and space complexity analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aggregation; M/G/1-type processes; Markov chains;
matrix analytic method",
}
@Article{Jin:2002:SMD,
author = "Shudong Jin and Azer Bestavros",
title = "Scalability of multicast delivery for non-sequential
streaming access",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "97--107",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511347",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To serve asynchronous requests using multicast, two
categories of techniques---stream merging and periodic
broadcasting---have been proposed. For sequential
streaming access, where requests are uninterrupted from
the beginning to the end of an object, these techniques
are highly scalable: the required server bandwidth for
stream merging grows {\em logarithmically\/} as request
arrival rate, and the required server bandwidth for
periodic broadcasting varies {\em logarithmically\/} as
the inverse of start-up delay. A sequential access
model, however, is inappropriate to model partial
requests and client interactivity observed in various
streaming access workloads. This paper analytically and
experimentally studies the scalability of multicast
delivery under a non-sequential access model where
requests start at random points in the object. We show
that the required server bandwidth for any protocol
providing immediate service grows at least as the {\em
square root\/} of request arrival rate, and the
required server bandwidth for any protocol providing
delayed service grows {\em linearly\/} with the inverse
of start-up delay. We also investigate the impact of
limited client receiving bandwidth on scalability. We
optimize practical protocols which provide immediate
service to non-sequential requests. The protocols
utilize limited client receiving bandwidth, and they
are near-optimal in that the required server bandwidth
is very close to its lower bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mauer:2002:FST,
author = "Carl J. Mauer and Mark D. Hill and David A. Wood",
title = "Full-system timing-first simulation",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "108--116",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511349",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer system designers often evaluate future design
alternatives with detailed simulators that strive for
{\em functional fidelity\/} (to execute relevant
workloads) and {\em performance fidelity\/} (to rank
design alternatives). Trends toward multi-threaded
architectures, more complex micro-architectures, and
richer workloads, make authoring detailed simulators
increasingly difficult. To manage simulator complexity,
this paper advocates decoupled simulator organizations
that separate functional and performance concerns.
Furthermore, we define an approach, called {\em
timing-first simulation}, that uses an augmented timing
simulator to execute instructions important to
performance in conjunction with a functional simulator
to insure correctness. This design simplifies software
development, leverages existing simulators, and can
model micro-architecture timing in detail. We describe
the timing-first organization and our experiences
implementing TFsim, a full-system multiprocessor
performance simulator. TFsim models a pipelined,
out-of-order micro-architecture in detail, was
developed in less than one person-year, and performs
competitively with previously-published simulators.
TFsim's timing simulator implements dynamically common
instructions (99.99\% of them), while avoiding the vast
and exacting implementation efforts necessary to run
unmodified commercial operating systems and workloads.
Virtutech Simics, a full-system functional simulator,
checks and corrects the timing simulator's execution,
contributing 18-36\% to the overall run-time. TFsim's
mostly correct functional implementation introduces a
worst-case performance error of 4.8\% for our
commercial workloads. Some additional simulator
performance is gained by verifying functional
correctness less often, at the cost of some additional
performance error.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2002:PPR,
author = "Ruoming Jin and Gagan Agrawal",
title = "Performance prediction for random write reductions: a
case study in modeling shared memory programs",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "117--128",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511350",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we revisit the problem of performance
prediction on shared memory parallel machines,
motivated by the need for selecting parallelization
strategy for {\em random write reductions.\/} Such
reductions frequently arise in data mining algorithms.
In our previous work, we have developed a number of
techniques for parallelizing this class of reductions.
Our previous work has shown that each of the three
techniques, {\em full replication, optimized full
locking}, and {\em cache-sensitive}, can outperform
others depending upon problem, dataset, and machine
parameters. Therefore, an important question is, {\em
`Can we predict the performance of these techniques for
a given problem, dataset, and machine?'.\/} This paper
addresses this question by developing an analytical
performance model that captures a two-level cache,
coherence cache misses, TLB misses, locking overheads,
and contention for memory. Analytical model is combined
with results from micro-benchmarking to predict
performance on real machines. We have validated our
model on two different SMP machines. Our results show
that our model effectively captures the impact of
memory hierarchy (two-level cache and TLB) as well as
the factors that limit parallelism (contention for
locks, memory contention, and coherence cache misses).
The difference between predicted and measured
performance is within 20\% in almost all cases.
Moreover, the model is quite accurate in predicting the
relative performance of the three parallelization
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kandiraju:2002:CTB,
author = "Gokul B. Kandiraju and Anand Sivasubramaniam",
title = "Characterizing the $d$-{TLB} behavior of {SPEC
CPU2000} benchmarks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "129--139",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the numerous optimization and evaluation
studies that have been conducted with TLBs over the
years, there is still a deficiency in an in-depth
understanding of TLB characteristics from an
application angle. This paper presents a detailed
characterization study of the TLB behavior of the SPEC
CPU2000 benchmark suite. The contributions of this work
are in identifying important application
characteristics for TLB studies, quantifying the
SPEC2000 application behavior for these
characteristics, as well as making pronouncements and
suggestions for future research based on these results.
Around one-fourth of the SPEC2000 applications (ammp,
apsi, galgel, lucas, mcf, twolf and vpr) have
significant TLB missrates. Both capacity and
associativity are influencing factors on miss-rates,
though they do not necessarily go hand-in-hand.
Multi-level TLBs are definitely useful for these
applications in cutting down access times without
significant miss rate degradation. Superpaging to
combine TLB entries may not be rewarding for many of
these applications. Software management of TLBs in
terms of determining what entries to prefetch, what
entries to replace, and what entries to pin has a lot
of potential to cut down miss rates considerably.
Specifically, the potential benefits of prefetching TLB
entries is examined, and Distance Prefetching is shown
to give good prediction accuracy for these
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hertz:2002:EFG,
author = "Matthew Hertz and Stephen M. Blackburn and J. Eliot B.
Moss and Kathryn S. McKinley and Darko Stefanovi{\'c}",
title = "Error-free garbage collection traces: how to cheat and
not get caught",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "140--151",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Programmers are writing a large and rapidly growing
number of programs in object-oriented languages such as
Java that require garbage collection (GC). To explore
the design and evaluation of GC algorithms quickly,
researchers are using simulation based on traces of
object allocation and lifetime behavior. The {\em brute
force\/} method generates perfect traces using a
whole-heap GC at every potential GC point in the
program. Because this process is prohibitively
expensive, researchers often use {\em granulated\/}
traces by collecting only periodically, e.g., every 32K
bytes of allocation. We extend the state of the art for
simulating GC algorithms in two ways. First, we present
a systematic methodology and results on the effects of
trace granularity for a variety of copying GC
algorithms. We show that trace granularity often
distorts GC performance results compared with perfect
traces, and that some GC algorithms are more sensitive
to this effect than others. Second, we introduce and
measure the performance of a new precise algorithm for
generating GC traces which is over 800 times faster
than the brute force method. Our algorithm, called
Merlin, frequently timestamps objects and later uses
the timestamps of dead objects to reconstruct precisely
when they died. It performs only periodic garbage
collections and achieves high accuracy at low cost,
eliminating any reason to use granulated traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cameron:2002:HDM,
author = "Craig W. Cameron and Steven H. Low and David X. Wei",
title = "High-density model for server allocation and
placement",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "152--159",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511354",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well known that optimal server placement is
NP-hard. We present an approximate model for the case
when both clients and servers are dense, and propose a
simple server allocation and placement algorithm based
on high-rate vector quantization theory. The key idea
is to regard the location of a request as a random
variable with probability density that is proportional
to the demand at that location, and the problem of
server placement as source coding, i.e., to optimally
map a source value (request location) to a code-word
(server location) to minimize distortion (network
cost). This view has led to a joint server allocation
and placement algorithm that has a time-complexity that
is linear in the number of clients. Simulations are
presented to illustrate its performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content distribution; high density; server placement
and allocation",
}
@Article{Olshefski:2002:ICR,
author = "David P. Olshefski and Jason Nieh and Dakshi Agrawal",
title = "Inferring client response time at the {Web} server",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "160--171",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511355",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As businesses continue to grow their World Wide Web
presence, it is becoming increasingly vital for them to
have quantitative measures of the client perceived
response times of their web services. We present Certes
(CliEnt Response Time Estimated by the Server), an
online server-based mechanism for web servers to
measure client perceived response time, as if measured
at the client. Certes is based on a model of TCP that
quantifies the effect that connection drops have on
perceived client response time, by using three simple
server-side measurements: connection drop rate,
connection accept rate and connection completion rate.
The mechanism does not require modifications to http
servers or web pages, does not rely on probing or third
party sampling, and does not require client-side
modifications or scripting. Certes can be used to
measure response times for any web content, not just
HTML. We have implemented Certes and compared its
response time measurements with those obtained with
detailed client instrumentation. Our results
demonstrate that Certes provides accurate server-based
measurements of client response times in HTTP 1.0/1.1
[14] environments, even with rapidly changing
workloads. Certes runs online in constant time with
very low overhead. It can be used at web sites and
server farms to verify compliance with service level
objectives.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "client perceived response time; web server",
}
@Article{Lee:2002:ACD,
author = "Sam C. M. Lee and John C. S. Lui and David K. Y. Yau",
title = "Admission control and dynamic adaptation for a
proportional-delay diffserv-enabled {Web} server",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "172--182",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511356",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a web server that can provide
differentiated services to clients with different QoS
requirements. The web server can provide $ N > 1 $
classes of service. Rather than using a strict priority
policy, which may lead to request starvation, the web
server provides a proportional-delay differentiated
service (PDDS) to heterogeneous clients. An operator
for the web server can specify `fixed' performance
spacings between classes, namely, $ r_{i, i + 1} > 1 $,
for $ i = 1, \ldots {}, N - 1 $. Requests in class $ i
+ 1 $ are guaranteed to have an average waiting time
which is $ 1 / r_{i, i + 1} $ of the average waiting
time of class $i$ requests. With PDDS, we can provide
consistent performance spacings over a wide range of
system loadings. In addition, each client can specify a
maximum average waiting time requirement to be
guaranteed by the web server. We propose two efficient
admission control algorithms so that a web server can
provide the QoS guarantees and, at the same time,
classify each client to its `lowest' admissible class,
resulting in lowest usage cost for the client. We also
consider how to perform end-point dynamic adaptation
such that clients can submit requests at lower class
and further reduce their usage cost, without violating
their QoS requirements. We propose two dynamic
adaptation algorithms: one is server-based and the
other is client-based. The client-based adaptation is
based on a non-cooperative game technique. We report
diverse experimental results to illustrate the
effectiveness of these algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2002:QSE,
author = "Haonan Tan and Derek L. Eager and Mary K. Vernon and
Hongfei Guo",
title = "Quality of service evaluations of multicast streaming
protocols",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "183--194",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511358",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently proposed scalable on-demand streaming
protocols have previously been evaluated using a system
cost measure termed the `required server bandwidth'.
For the scalable protocols that provide immediate
service to each client when the server is not
overloaded, this paper develops simple analytic models
to evaluate two client-oriented quality of service
metrics, namely (1) the mean client waiting time in
systems where clients are willing to wait if a
(well-provisioned) server is temporarily overloaded,
and (2) the fraction of clients who balk (i.e., leave
without receiving their requested media content) in
systems where the clients will tolerate no or only very
low service delays during a temporary overload. The
models include novel approximate MVA techniques that
appear to extend the range of applicability of
customized AMVA to include questions focussed on state
probabilities rather than on mean values, and to
systems in which the operating points of interest do
not include substantial client queues. For example, the
new AMVA models accurately estimate the server
bandwidth needed to achieve a balking rate as low as
one in ten thousand. The analytic models can easily be
applied to determine the server bandwidth needed for a
given number of media files, anticipated total client
request rate and file access frequencies, and target
balking rate or mean wait. Results show that (a)
scalable media servers that are configured with the
`required server bandwidth' defined in previous work
have low mean wait but may have unacceptably high
client balking rates (i.e., greater than one in
twenty), (b) for high to moderate client load, only a
10--50\% increase in the previously defined required
server bandwidth is needed to achieve a very low
balking rate (e.g., one in ten thousand), and (c) media
server performance (either mean wait or balking rate)
degrades rapidly if the actual client load is more than
10\% greater than the anticipated load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balachandran:2002:CUB,
author = "Anand Balachandran and Geoffrey M. Voelker and
Paramvir Bahl and P. Venkat Rangan",
title = "Characterizing user behavior and network performance
in a public wireless {LAN}",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "195--205",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511359",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents and analyzes user behavior and
network performance in a public-area wireless network
using a workload captured at a well-attended ACM
conference. The goals of our study are: (1) to extend
our understanding of wireless user behavior and
wireless network performance; (2) to characterize
wireless users in terms of a parameterized model for
use with analytic and simulation studies involving
wireless LAN traffic; and (3) to apply our workload
analysis results to issues in wireless network
deployment, such as capacity planning, and potential
network optimizations, such as algorithms for load
balancing across multiple access points (APs) in a
wireless network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2002:ECT,
author = "Harkirat Singh and Suresh Singh",
title = "Energy consumption of {TCP Reno}, {Newreno}, and
{SACK} in multi-hop wireless networks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "206--216",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we compare the energy consumption
behavior of three versions of TCP --- Reno, Newreno,
and SACK. The experiments were performed on a wireless
testbed where we measured the energy consumed at the
sender node. Our results indicate that, in most cases,
using total energy consumed as the metric, SACK
outperforms Newreno and Reno while Newreno performs
better than Reno. The experiments emulated a large set
of network conditions including variable round trip
times, random loss, bursty loss, and packet reordering.
We also estimated the idealized energy for each of the
three implementations (i.e., we subtract out the energy
consumed when the sender is idle) and here,
surprisingly, we find that in many instances SACK
performs poorly compared to the other two
implementations. We conclude that if the mobile device
has a very low idle power consumption then SACK is not
the best implementation to use for bursty or random
loss. On the other hand, if the idle power consumption
is significant, then SACK is the best choice since it
has the lowest overall energy consumption.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy; mobile; TCP; wireless",
}
@Article{Heath:2002:ICA,
author = "Taliver Heath and Richard P. Martin and Thu D.
Nguyen",
title = "Improving cluster availability using workstation
validation",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "217--227",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511362",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We demonstrate a framework for improving the
availability of cluster based Internet services. Our
approach models Internet services as a collection of
interconnected components, each possessing well defined
interfaces and failure semantics. Such a decomposition
allows designers to engineer high availability based on
an understanding of the interconnections and isolated
fault behavior of each component, as opposed to ad-hoc
methods. In this work, we focus on using the entire
commodity workstation as a component because it
possesses natural, fault-isolated interfaces. We define
a failure event as a reboot because not only is a
workstation unavailable during a reboot, but also
because reboots are symptomatic of a larger class of
failures, such as configuration and operator errors.
Our observations of 3 distinct clusters show that the
time between reboots is best modeled by a Weibull
distribution with shape parameters of less than 1,
implying that a workstation becomes more reliable the
longer it has been operating. Leveraging this observed
property, we design an allocation strategy which
withholds recently rebooted workstations from active
service, validating their stability before allowing
them to return to service. We show via simulation that
this policy leads to a 70-30 rule-of-thumb: For a
constant utilization, approximately 70\% of the
workstation failures can be masked from end clients
with 30\% extra capacity added to the cluster, provided
reboots are not strongly correlated. We also found our
technique is most sensitive to the burstiness of
reboots as opposed to absolute lengths of workstation
uptimes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lai:2002:LWA,
author = "Albert Lai and Jason Nieh",
title = "Limits of wide-area thin-client computing",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "228--239",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511363",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While many application service providers have proposed
using thin-client computing to deliver computational
services over the Internet, little work has been done
to evaluate the effectiveness of thin-client computing
in a wide-area network. To assess the potential of
thin-client computing in the context of future
commodity high-bandwidth Internet access, we have used
a novel, non-invasive slow-motion benchmarking
technique to evaluate the performance of several
popular thin-client computing platforms in delivering
computational services cross-country over Internet2.
Our results show that using thin-client computing in a
wide-area network environment can deliver acceptable
performance over Internet2, even when client and server
are located thousands of miles apart on opposite ends
of the country. However, performance varies widely
among thin-client platforms and not all platforms are
suitable for this environment. While many thin-client
systems are touted as being bandwidth efficient, we
show that network latency is often the key factor in
limiting wide-area thin-client performance.
Furthermore, we show that the same techniques used to
improve bandwidth efficiency often result in worse
overall performance in wide-area networks. We
characterize and analyze the different design choices
in the various thin-client platforms and explain which
of these choices should be selected for supporting
wide-area computing services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vetter:2002:DSP,
author = "Jeffrey Vetter",
title = "Dynamic statistical profiling of communication
activity in distributed applications",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "240--250",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511364",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance analysis of communication activity for a
terascale application with traditional message tracing
can be overwhelming in terms of overhead, perturbation,
and storage. We propose a novel alternative that
enables dynamic statistical profiling of an
application's communication activity using message
sampling. We have implemented an operational prototype,
named PHOTON, and our evidence shows that this new
approach can provide an accurate, low-overhead,
tractable alternative for performance analysis of
communication activity. PHOTON consists of two
components: a Message Passing Interface (MPI) profiling
layer that implements sampling and analysis, and a
modified MPI runtime that appends a small but necessary
amount of information to individual messages. More
importantly, this alternative enables an assortment of
runtime analysis techniques so that, in contrast to
post-mortem, trace-based techniques, the raw
performance data can be jettisoned immediately after
analysis. Our investigation shows that message sampling
can reduce overhead to imperceptible levels for many
applications. Experiments on several applications
demonstrate the viability of this approach. For
example, with one application, our technique reduced
the analysis overhead from 154\% for traditional
tracing to 6\% for statistical profiling. We also
evaluate different sampling techniques in this
framework. The coverage of the sample space provided by
purely random sampling is superior to counter- and
timer-based sampling. Also, PHOTON's design reveals
that frugal modifications to the MPI runtime system
could facilitate such techniques on production
computing systems, and it suggests that this sampling
technique could execute continuously for long-running
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cook:2002:TRP,
author = "Jeanine Cook and Richard L. Oliver and Eric E.
Johnson",
title = "Toward reducing processor simulation time via dynamic
reduction of microarchitecture complexity",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "252--253",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511366",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As processor microarchitectures continue to increase
in complexity, so does the time required to explore the
design space. Performing cycle-accurate, detailed
timing simulation of a realistic workload on a proposed
processor microarchitecture often incurs a
prohibitively large time cost. We propose a method to
reduce the time cost of simulation by dynamically
varying the complexity of the processor model
throughout the simulation. In this paper, we give first
evidence of the feasibility of this approach. We
demonstrate that there are significant amounts of time
during a simulation where a reduced processor model
accurately tracks important behavior of a full model,
and that by simulating the reduced model during these
times the total simulation time can be reduced.
Finally, we discuss metrics for detecting areas where
the two processor models track each other, which is
crucial for dynamically deciding when to use a reduced
rather than a full model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shih:2002:ETC,
author = "Jimmy S. Shih and Randy H. Katz",
title = "Evaluating tradeoffs of congestion pricing for voice
calls",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "254--255",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511367",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We conducted user experiments and simulations to
understand the tradeoffs of congestion pricing between
system performance and user satisfaction for a large
community of users. We found that congestion pricing
can be effective for voice calls because it only needs
to be applied occasionally and that users are
responsive to occasional price increases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivan-Zimet:2002:WBO,
author = "Miriam Sivan-Zimet and Tara M. Madhyastha",
title = "Workload based optimization of probe-based storage",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "256--257",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511368",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance gap between microprocessors and
secondary storage is still a limitation in today's
systems. Academia and industry are developing new
technologies to overcome this gap, such as improved
read-write head technology and higher storage
densities. One promising new technology is probe-based
storage[1]. Characteristics of probe-based storage
include small size, high density, high parallelism, low
power consumption, and rectilinear motion. We have
created a probe-based storage simulation model,
configurable to different design points, and identify
its sensitivity to various parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lv:2002:SRU,
author = "Qin Lv and Pei Cao and Edith Cohen and Kai Li and
Scott Shenker",
title = "Search and replication in unstructured peer-to-peer
networks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "258--259",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511369",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Decentralized and unstructured peer-to-peer networks
such as Gnutella are attractive for certain
applications because they require no centralized
directories and no precise control over network
topology or data placement. However, the flooding-based
query algorithm used in Gnutella does not scale; each
individual query generates a large amount of traffic
and large systems quickly become overwhelmed by the
query-induced load. This paper explores various
alternatives to Gnutella's query algorithm and data
replication strategy. We propose a query algorithm
based on multiple random walks that resolves queries
almost as quickly as Gnutella's flooding method while
reducing the network traffic by two orders of magnitude
in many cases. We also present a distributed
replication strategy that yields close-to-optimal
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandramouli:2002:ALT,
author = "Y. Chandramouli and Arnold Neidhardt",
title = "Application level traffic measurements for capacity
engineering",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "260--261",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511370",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In general, the traffic characteristics of the
individual applications that constitute the aggregate
traffic on a network can be important for capacity
engineering. In this paper, we demonstrate based on
mathematical analysis the value of application specific
measurements even when there is no service
differentiation. In other words, under certain
assumptions, we obtain the result that errors in
engineering can occur, and in particular,
under-engineering can occur when traffic
characteristics of individual applications are ignored.
The assumptions are that the individual applications
can be modeled adequately as Fractional Brownian
Motions and that measurements are available only at
relatively coarse time scales. The results in this
paper emphasize the value of collecting fine-grained
traffic measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:2002:CAT,
author = "Carey Williamson and Qian Wu",
title = "Context-aware {TCP\slash IP}",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "262--263",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511371",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the design and evaluation of
CATNIP, a Context-Aware Transport/Network Internet
Protocol for the Web. This integrated protocol uses
application-layer knowledge (i.e., Web document size)
to provide explicit context information to the TCP and
IP protocols. While this approach violates the
traditional layered Internet protocol architecture, it
enables informed decision-making, both at network
endpoints and at network routers, regarding flow
control, congestion control, and packet discard
decisions. The ns-2 network simulator is used to
evaluate the performance of the context-aware TCP/IP
approach, using a simple network topology and a
synthetic Web workload. Simulation results indicate a
10-20\% reduction in TCP packet loss using simple
endpoint control mechanisms. More importantly, using
CATNIP context information at IP routers can produce
20-80\% reductions in the mean Web page retrieval
times, and 60-90\% reductions in the standard deviation
of retrieval times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barakat:2002:IBT,
author = "Chadi Barakat and Patrick Thiran and Gianluca
Iannaccone and Christophe Diot",
title = "On {Internet} backbone traffic modeling",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "264--265",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511372",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The motivation of this work is to design a traffic
model that can be used in routers or by network
administrators to assist in network design and
management. Currently, network operators have very
basic information about the traffic. They mostly use
SNMP, which provides average throughput information
over 5 minutes intervals. An analytical model can
provide more accurate information on the traffic such
as its variation and its auto-correlation at short
timescales. In contrast to other works (see [2] and the
references therein), we choose to model the traffic on
a link that is {\em not\/} congested (congestion
possibly appears elsewhere in the Internet). This
assumption is valid (and in fact is the rule) for
backbone links that are generally over-provisioned
(i.e., the network is designed so that a backbone link
does not reach 50\% utilization in the absence of link
failure [4]). This choice is driven by our main
objective, which is to provide a link dimensioning tool
usable in backbone network management. We opt for a
model of the traffic at the flow level. Modeling the
traffic at the packet level is very difficult, since
traffic on a link is the result of a high level of
multiplexing of numerous flows whose behavior is
strongly influenced by the transport protocol and by
the application. A flow in our model is a very generic
notion. It can be a TCP connection or a UDP stream
(described by source and destination IP addresses,
source and destination port numbers and the protocol
number), or it can be a destination address prefix
(e.g., destination IP address in the form a.b.0.0/16).
The definition of a flow is deliberately kept general,
which allows our model to be applied to different
applications and to different transport mechanisms. The
model can however be specified to some particular
traffic types such as FTP and HTTP. By specifying the
model to a certain traffic type, one must expect to
obtain better results. Data flows arrive to a backbone
link at random times, transport a random volume of
data, and stay active for random periods. Given
information on flows, our model aims to compute the
total (aggregate) rate of data observed on the backbone
link. We are interested in capturing the dynamics of
the total data rate at short timescales (i.e., of the
order of hundreds of milliseconds). This dynamics can
be completely characterized using simple mathematical
tools, namely the shot-noise process [3]. Our main
contribution is the computation of simple expressions
for important measures of backbone traffic such as its
average, its variance, and its auto-correlation
function. These expressions are functions of a few
number of parameters that can be easily computed by a
router (e.g., using a tool such as NetFlow, which
provides flow information in Cisco routers).Our model
can be helpful for managing and dimensioning IP
backbone networks. Knowing the average and the variance
of the traffic allows an ISP to provision the links of
its backbone so as to avoid congestion. Congestion can
be avoided at short timescales of the order of hundreds
of milliseconds. The auto-correlation function of the
traffic can be used to propose predictors for its
future values. The prediction of the traffic has
diverse applications in managing the resources of the
backbone. One interesting application is the use of a
short-term prediction to optimize packet routing and
load balancing. Our model can also be used to assess
the impact on backbone traffic of changes made in the
rest of the Internet such as the addition of a new
customer, a new application, or a new transport
mechanism. The ISP can plan the provisioning of its
backbone so as to absorb the resulting change of
traffic before this change takes place.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:2002:SND,
author = "Alexander Thomasian and Chang Liu",
title = "Some new disk scheduling policies and their
performance",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "266--267",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advances in magnetic recording technology have
resulted in a rapid increase in disk capacities, but
improvements in the mechanical characteristics of disks
have been quite modest. For example, the access time to
random disk blocks has decreased by a mere factor of
two, while disk capacities have increased by several
orders of magnitude. OLTP applications subject disks to
a very demanding workload consisting of accesses to
randomly distributed disk blocks and gain limited
benefit from caching and prefetching (at the onboard
disk cache). We propose some new disk scheduling
methods to address the limited disk access bandwidth
problem. Some well-known disk scheduling methods are:
(i) FCFS. (ii) Shortest Seek Time First (SSTF). (iii)
SCAN and Cyclical SCAN (CSCAN). The latter moves the
disk arm to its beginning point after each SCAN so that
requests at all disk cylinders are treated
symmetrically. (iv) CSCAN with a lookahead of next {\em
i\/} requests (CSCAN-LAi) takes into account latency to
reorder their processing to minimize the sum of their
service times. (v) Shortest Access Time First (SATF),
which provides the best performance [2]. (vi) SATF with
lookahead for $i$ requests (SATF-LAi).In the case of
SATF-LAi with $i$ = 2 after the completion of request
$X$ the scheduler chooses requests $A$ and $B$ such
that the sum of their service times processed
consecutively, i.e., $ t_{X, A} + a t_{A, B}$, is
minimized. In {\em SATF with flexible lookahead\/} only
request $A$ is definitely processed and request $B$ is
processed provided that it is selected in the next
round. We refer to $a$ as the {\em discount factor\/}
($ 0 \leq a \leq 1$), because less weight is attached
to the service time of request $B$, since it may not be
processed after request $A$. The case $ a = 0$
corresponds to pure SATF. When $ a = 1$ we consider a
variant called {\em SATF with fixed lookahead\/} where
$B$ is processed unconditionally after $A$ before any
other (perhaps more favorable recent) requests. Thus
requests are processed two at a time, unless only one
request is available. More generally requests in the
temporal neighborhood of request $A$ are given higher
priority.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2002:SCC,
author = "Kang-Won Lee and Khalil Amiri and Sambit Sahu and
Chitra Venkatramani",
title = "On the sensitivity of cooperative caching performance
to workload and network characteristics",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "268--269",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511374",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A rich body of literature exists on several aspects of
cooperative caching [1, 2, 3, 4, 5], including object
placement and replacement algorithms [1], mechanisms
for reducing the overhead of cooperation [2, 3], and
the performance impact of cooperation [3, 4, 5].
However, while several studies have focused on
quantifying the performance benefit of cooperative
caching, their conclusions on the effectiveness of such
cooperation vary significantly. The source of this
apparent disagreement lies mainly in their different
assumptions about workload and network characteristics,
and about the degree of cooperation among caches. To
more comprehensively evaluate the practical benefit of
cooperative caching, we explore the sensitivity of the
benefit of cooperation to workload characteristics such
as {\em object popularity distribution, temporal
locality, one time referencing behavior}, and to
network characteristics such as {\em latencies between
clients, proxies, and servers.\/} Furthermore, we
identify a critical workload characteristic, which we
call {\em average access density}, and show that it has
a crucial impact on the effectiveness of cooperative
caching. In this extended abstract, we report on a few
important results selected from our extensive study
reported in [6]. In particular, assuming an LFU-based
cache management policy, we arrive at the following
conclusions. First, cooperative caching is only
effective when the {\em average access density\/}
(defined as the ratio of the number of requests to the
number of distinct objects in a time window) is
relatively high. Second, the effectiveness of
cooperative caching decreases as the skew in object
popularity increases. Higher skew means that only a
small number of objects are most frequently accessed
reducing the benefit of larger caches, and therefore of
cooperation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anantharaman:2002:MAT,
author = "Vaidyanathan Anantharaman and Raghupathy Sivakumar",
title = "A microscopic analysis of {TCP} performance over
wireless ad-hoc networks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "270--271",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad-hoc networks are multi-hop wireless networks that
can operate without the services of an established
backbone infrastructure. While such networks have
obvious applications in the military and disaster
relief environments, more recent works that have
motivated their use even in regular wireless packet
data networks have increased their significance. The
focus of this paper is to study the performance of the
TCP transport layer protocol over ad-hoc networks.
Recent works in transport protocols for ad-hoc networks
have investigated the impact of ad-hoc network
characteristics on TCP's performance, and proposed
schemes that help TCP overcome the negative impact of
such characteristics as random wireless loss and
mobility. The primary mechanism proposed involves
sending an explicit link failure notification (ELFN) to
the source from the point of link failure. The source,
upon receiving the ELFN {\em freezes\/} TCP's timers
and state, re-computes a new route to the destination,
and either releases the timers and state or re-starts
them from their respective initial values. While the
goal of ELFN based approaches is to prevent the route
disruption time from adversely impacting TCP's
performance, in this paper we contend that there are
several other factors that influence TCP's performance
degradation. We briefly outline the different factors
below: $ \bullet $ {\em TCP Losses:\/} Every route
failure induces upto a TCP-window worth of packet
losses. While the losses have an absolute impact on the
performance degradation, the TCP source also reacts to
the losses by reducing the size of its window. Note
that ELFN will prevent this negative impact on TCP's
performance by appropriately freezing TCP's state. $
\bullet $ {\em MAC Failure Detection Time:\/} Since the
MAC layer (802.11) has to go through multiple
retransmissions before concluding link failure, there
is a distinct component associated with the time taken
to actually detect link failure since the occurrence of
the failure. Importantly, the detection time increases
with increasing load in the network. While an external
mechanism to detect link failures (e.g. through
periodic beacons at the routing layer) would solve this
problem, it comes at the cost of beacon overheads and
associated trade-offs. $ \bullet $ {\em MAC Packet
Arrival:\/} When a failure is detected as described
above, the link failure indication is sent only to the
source of the packet that triggered the detection. If
another source is using the same link in the path to
its destination, the node upstream of the link failure
will wait until it receives a packet from that source
before informing it of the link failure. This also
contributes to the magnitude of the delay after which a
source realizes that a path is broken. $ \bullet $ {\em
Route Computation Time:\/} Once a source is informed of
a path failure, the time taken to recompute the route
also increases with increasing load. With ELFN, for a
load of 25 connections, the per-flow average of the
aggregate time spent in route computation during a 100
second simulation was as high as 15 seconds. In
addition to the absolute impact of the idle periods,
TCP is also likely to experience timeouts, especially
in the heavily loaded scenarios where the route
computation time can be high. In the next section, we
present a framework of mechanisms called {\em Atra\/}
targeted toward addressing each of the above
components. We show through representative simulation
results that the proposed mechanisms outperform both
the default protocol stack and an ELFN-enabled protocol
stack substantially. We assume the default protocol
stack to comprise of the IEEE 802.11 MAC protocol, the
Dynamic Source Routing (DSR) routing protocol, and
TCP-NewReno as the transport layer protocol. For a more
detailed analysis of TCP performance in mobile ad-hoc
networks, and description of the Atra framework, please
see [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2002:ARS,
author = "Baek-Young Choi and Jaesung Park and Zhi-Li Zhang",
title = "Adaptive random sampling for load change detection",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "272--273",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Timely detection of changes in traffic load is
critical for initiating appropriate traffic engineering
mechanisms. Accurate measurement of traffic is
essential since the efficacy of change detection
depends on the accuracy of traffic estimation. However,
{\em precise\/} traffic measurement involves inspecting
{\em every\/} packet traversing a link, resulting in
significant overhead, particularly on high speed links.
{\em Sampling\/} techniques for traffic load {\em
estimation\/} are proposed as a way to limit the
measurement overhead. In this paper, we address the
problem of {\em bounding\/} sampling error within a
pre-specified tolerance level and propose an {\em
adaptive random sampling\/} technique that determines
the {\em minimum\/} sampling probability adaptively
according to traffic dynamics. Using real network
traffic traces, we show that the proposed adaptive
random sampling technique indeed produces the desired
accuracy, while also yielding significant reduction in
the amount of traffic samples. We also investigate the
impact of sampling errors on the performance of load
change detection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "change detection; sampling",
}
@Article{Zhao:2002:MEN,
author = "Zhili Zhao and Jayesh Ametha and Swaroop Darbha and A.
L. Narasimha Reddy",
title = "A method for estimating non-responsive traffic at a
router",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "274--275",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a scheme for estimating the
proportion of the incoming traffic that is not
responding to congestion at a router. The idea of the
proposed scheme is that if the observed queue length
and packet drop probability do not match with the
predicted results from the TCP model, then the error
must come from the non-responsive traffic; it can then
be used for estimating non-responsive traffic. The
proposed scheme utilizes queue length history, packet
drop history, expected TCP and queue dynamics to
estimate the proportion. We show that the proposed
scheme is effective over a wide range of traffic
scenarios through simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "control theory; estimation; non-responsive traffic;
traffic modeling",
}
@Article{Guo:2002:SFU,
author = "Liang Guo and Ibrahim Matta",
title = "Scheduling flows with unknown sizes: approximate
analysis",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "276--277",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511378",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous job scheduling studies indicate that
providing rapid response to interactive jobs which
place frequent but small demands, can reduce the
overall system average response time [1], especially
when the job size distribution is skewed (see [2] and
references therein). Since the distribution of Internet
flows is skewed, it is natural to design a network
system that favors short file transfers through service
differentiation. However, to maintain system
scalability, detailed per-flow state such as flow
length is generally not available inside the network.
As a result, we usually resort to a threshold-based
heuristic to identify and give preference to short
flows. Specifically, packets from a new flow are always
given the highest priority. However, the priority is
reduced once the flow has transferred a certain amount
of packets. In this paper, we use the MultiLevel (ML)
feedback queue [3] to characterize this discriminatory
system. However, the solution given in [3] is in the
form of an integral equation, and to date the equation
has been solved only for job size distribution that has
the form of mixed exponential functions. We adopt an
alternative approach, namely using a conservation law
by Kleinrock [1], to solve for the average response
time in such system. To that end, we approximate the
average response time of jobs by a linear function in
the job size and solve for the stretch (service
slowdown) factors. We show by simulation that such
approximation works well for job (flow) size
distributions that possess the heavy-tailed property
[2], although it does not work so well for exponential
distributions. Due to the limited space available, in
Section 2 we briefly describe the queueing model and
summarize our approximation approach to solving for the
average response time of the M/G/1/ML queueing system.
We conclude our paper in Section 3.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alouf:2002:FVC,
author = "Sara Alouf and Fabrice Huet and Philippe Nain",
title = "Forwarders vs. centralized server: an evaluation of
two approaches for locating mobile agents",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "278--279",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Internet has allowed the creation of huge amounts
of data located on many sites. Performing complex
operations on some data requires that the data be
transferred first to the machine on which the
operations are to be executed, which may require a
non-negligible amount of bandwidth and may seriously
limit performance if it is the bottleneck. However,
instead of moving the data to the code, it is possible
to move the code to the data, and perform all the
operations locally. This simple idea has led to a new
paradigm called {\em code-mobility:\/} a mobile object
--- sometimes called an agent --- is given a list of
destinations and a series of operations to perform on
each one of them. The agent will visit all of the
destinations, perform the requested operations and
possibly pass the result on to another object. Any
mobility mechanism must first provide a way to migrate
code from one host to another. It must also ensure that
any communication following a migration will not be
impaired by it, namely that two objects should still be
able to communicate even if one of them has migrated.
Such a mechanism is referred to as a {\em location\/}
mechanism since it often relies on the knowledge of the
location of the objects to ensure communications. Two
location mechanisms are widely used: the first one uses
a centralized server whereas the second one relies on
special objects called {\em forwarders.\/} This paper
evaluates and compares the performance of an existing
implementation of these approaches in terms of cost of
communication in presence of migration. Based on a
Markov chain analysis, we will construct and solve two
mathematical models, one for each mechanism and will
use them to evaluate the cost of location. For the
purpose of validation, we have developed for each
mechanism a benchmark that uses {\em ProActive\/} [2],
a Java library that provides all the necessary
primitives for code mobility. Experiments conducted on
a LAN and on a MAN have validated both models and have
shown that the location server always performs better
than the forwarders. Using our analytical models we
will nevertheless identify situations where the
opposite conclusion holds. However, under most
operational conditions location servers will perform
better than forwarders.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2002:TCR,
author = "Hyunseok Chang and Ramesh Govindan and Sugih Jamin and
Scott J. Shenker and Walter Willinger",
title = "Towards capturing representative {AS}-level {Internet}
topologies",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "280--281",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For the past two years,there has been a significant
increase in research activities related to studying and
modeling the Internet's topology, especially at the
level of {\em autonomous systems\/} (ASs). A closer
look at the measurements that form the basis for all
these studies reveals that the data sets used consist
of the BGP routing tables collected by the Oregon route
server (henceforth, the {\em Oregon route-views\/})
[1]. So far, there has been anecdotal evidence and an
intuitive understanding among researchers in the field
that BGP-derived AS connectivity is not complete.
However, as far as we know, there has been no
systematic study on {\em quantifying\/} the
completeness of currently known AS-level Internet
topologies. Our main objective in this paper is to
quantify the completeness of Internet AS maps
constructed from the Oregon route-views and to attempt
to capture {\em more representative\/} AS-level
Internet topology. One of the main contributions of
this paper is in developing a methodology that enables
quantitative investigations into issues related to the
(in)completeness of BGP-derived AS maps.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brownlee:2002:ISS,
author = "Nevil Brownlee and kc claffy",
title = "{Internet} stream size distributions",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "282--283",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present and discuss stream size and lifetime
distributions for web and non-web TCP traffic on a
campus OC12 link at UC San Diego. The distributions are
stable over long periods, and show that on this link
only 3\% of the streams last longer than one minute,
and that only about 0.5\% of them are bigger than 100
kBytes. Although there are large streams (elephants) on
this link, the bulk of its traffic is composed of many
small streams (mice).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2002:CLD,
author = "Yingwu Zhu and Yiming Hu",
title = "Can large disk built-in caches really improve system
performance?",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "284--285",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511382",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Via detailed file system and disk system simulation,
we examine the impact of disk built-in caches on the
system performance. Our results indicate that the
current trend of using large built-in caches is
unnecessary and a waste of money and power for most
users. Disk manufacturers could use much smaller
built-in caches to reduce the cost as well as
power-consumption, without affecting performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simmonds:2002:WSB,
author = "Rob Simmonds and Carey Williamson and Russell Bradford
and Martin Arlitt and Brian Unger",
title = "{Web} server benchmarking using parallel {WAN}
emulation",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "286--287",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the use of a parallel
discrete-event network emulator called the Internet
Protocol Traffic and Network Emulator (IP-TNE) for Web
server benchmarking. The experiments in this paper
demonstrate the feasibility of high-performance WAN
emulation using parallel discrete-event simulation
techniques on shared-memory multiprocessors. Our
experiments with the Apache Web server achieve 3400
HTTP transactions per second for simple Web workloads,
and 1000 HTTP transactions per second for realistic Web
workloads, for static document retrieval across
emulated WAN topologies of up to 4096 concurrent
Web/TCP clients. The results show that WAN
characteristics, including round-trip delays, link
speeds, packet losses, packet sizes, and bandwidth
asymmetry, all have significant impacts on Web server
performance. WAN emulation enables stress testing and
benchmarking of Web server performance in ways that may
not be possible in simple LAN test scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almeida:2002:AWB,
author = "Virgilio Almeida and Martin Arlitt and Jerry Rolia",
title = "Analyzing a {Web}-based system's performance measures
at multiple time scales",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "3--9",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web and e-commerce workloads are known to vary
significantly from hour to hour, day to day, and week
to week. The causes of these fluctuations are changes
in the number of users visiting a site and the mix of
services they require. Since the workloads are known to
vary over time, one should not simply choose an
arbitrary time interval and consider it as a reference
for performance evaluation. We conclude that times
scales are of great importance for operational
analysis, particularly for systems with bursty loads.
Service level agreements must certainly take into
account measurement time scales. Similarly input
parameters for predictive models are sensitive to time
scale. Ultimately, a time scale should be chosen for
service level requirements that best expresses the
needs of end-users and the price the owner of a site is
willing to pay for QoS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andreolini:2002:PSD,
author = "Mauro Andreolini and Michele Colajanni and Ruggero
Morselli",
title = "Performance study of dispatching algorithms in
multi-tier {Web} architectures",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "10--20",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The number and heterogeneity of requests to Web sites
are increasing also because the Web technology is
becoming the preferred interface for information
systems. Many systems hosting current Web sites are
complex architectures composed by multiple server
layers with strong scalability and reliability issues.
In this paper we compare the performance of several
combinations of centralized and distributed dispatching
algorithms working at the first and second layer, and
using different levels of state information. We confirm
some known results about load sharing in distributed
systems and give new insights to the problem of
dispatching requests in multi-tier cluster-based Web
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2002:SND,
author = "Yan Chen and Khian Hao Lim and Randy H. Katz and Chris
Overton",
title = "On the stability of network distance estimation",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "21--30",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Overlay network distance monitoring and estimation
system can benefit many new applications and services,
such as peer-to-peer overlay routing and location.
However, there is a lack of such scalable system with
small overhead, good usability, and good distance
estimation accuracy and stability. Thus we propose a
scalable overlay distance monitoring system, {\em
Internet Iso-bar}, which clusters hosts based on the
similarity of their perceived network distance, with no
assumption about the underlying network topology. The
centers of each cluster are then chosen as monitors to
represent their clusters for probing and distance
estimation. We compare it with other network distance
estimation systems, such as Global Network Positioning
(GNP) [1]. Internet Iso-bar is easy to implement and
use, and has good scalability and small communication
and computation cost for online monitoring. Preliminary
evaluation on real Internet measurement data also shows
that Internet Iso-bar has high prediction accuracy and
stability. Finally, by adjusting the number of
clusters, we can smoothly trade off the measurement and
management cost for better distance estimation accuracy
and stability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:2002:DSP,
author = "Alexander Thomasian and Chang Liu",
title = "Disk scheduling policies with lookahead",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "31--40",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advances in magnetic recording technology have
resulted in a rapid increase in disk capacities, but
improvements in the mechanical characteristics of disks
have been quite modest. For example the access time to
random disk blocks has decreased by a mere factor of
two, while disk capacities have increased by several
orders of magnitude. High performance OLTP applications
subject disks to a very demanding workload, since they
require high access rates to randomly distributed disk
blocks and gain limited benefit from caching and
prefetching. We address this problem by re-evaluating
the performance of some well known disk scheduling
methods, before proposing and evaluating extensions to
them. A variation to CSCAN takes into account
rotational latency, so that the service time of further
requests is reduced. A variation to SATF considers the
sum of service times of several successive requests in
scheduling the next request, so that the arm is moved
to a (temporal) neighborhood with many requests. The
service time of further requests is discounted, since
their immediate processing is not guaranteed. A
variation to the SATF policy prioritizes reads with
respect to writes and processes winner write requests
conditionally, i.e., when the ratio of their service
time to that of the winner read request is smaller than
a certain threshold. We review previous work to put our
work into the proper perspective and discuss plans for
future work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk scheduling; LOOK; SATF; SCAN; scheduling policies
with lookahead; simulation",
}
@Article{Brandwajn:2002:NSB,
author = "Alexandre Brandwajn",
title = "A note on {SCSI} bus waits",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "41--47",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the SCSI-2 standard, the unique IDs of devices on
the bus define a fixed priority whenever several
devices compete for the use of the bus. Although the
more recent SCSI-3 standard specifies an additional
fair arbitration mode, it leaves such fair mode an
optional feature. Despite a number of allusions to
potential unfairness of the traditional SCSI bus
arbitration scattered in the trade literature, there
seem to be few formal studies to quantify this
unfairness. In this paper, we propose a simple model of
SCSI bus acquisition in which devices on the bus are
viewed as sources of requests with fixed non-preemptive
priorities. We use the model to assess the expected
extent of unfairness, as measured by the mean bus wait,
under varying load conditions. Effects of tagged
command queueing are not considered in this note.
Numerical results obtained with our model show that
there is little unfairness as long as the workload is
balanced across devices and the bus utilization is
relatively low. Interestingly, even for medium bus
utilization a significant fraction of bus requests find
the bus free which might correlate with the service
rounds noted in a recent experimental study. For
unbalanced loads and higher bus utilization, the
expected wait for the bus experienced by lowest
priority devices can become significantly larger than
the one experienced by highest priority device. This
appears to be especially true if the higher priority
devices have higher I/O rates and occupy the bus for
longer periods. As might be expected, even for balanced
workloads, unfairness tends to increase with the number
of devices on the bus.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:2002:PSP,
author = "Daniel A. Menasc{\'e} and Lavanya Kanchanapalli",
title = "Probabilistic scalable {P2P} resource location
services",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "48--58",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable resource discovery services form the core of
directory and other middleware services. Scalability
requirements preclude centralized solutions. The need
to have directory services that are highly robust and
that can scale with the number of resources and the
performance of individual nodes, points to Peer-to-Peer
(P2P) architectures as a promising approach. The
resource location problem can be simply stated as
`given a resource name, find the location of a node or
nodes that manage the resource.' We call this the {\em
deterministic\/} location problem. In a very large
network, it is clearly not feasible to contact all
nodes to locate a resource. Therefore, we modify the
problem statement to `given a resource name, find with
a given probability, the location of a node or nodes
that manage the resource.' We call this a {\em
probabilistic\/} location approach. We present a
protocol that solves this problem and develop an
analytical model to compute the probability that a
directory entry is found, the fraction of peers
involved in a search, and the average number of hops
required to find a directory entry. Numerical results
clearly show that the proposed approach achieves high
probability of finding the entry while involving a
relatively small fraction of the total number of peers.
The analytical results are further validated by results
obtained from an implementation of the proposed
protocol in a cluster of workstations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2002:SIW,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA 2002)}",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "2--2",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2002:APP,
author = "Shengke Yu and Marianne Winslett and Jonghyun Lee and
Xiaosong Ma",
title = "Automatic and portable performance modeling for
parallel {I/O}: a machine-learning approach",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "3--5",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A performance model for a parallel I/O system is
essential for detailed performance analyses, automatic
performance optimization of I/O request handling, and
potential performance bottleneck identification. Yet
how to build a portable performance model for parallel
I/O system is an open problem. In this paper, we
present a machine-learning approach to automatic
performance modeling for parallel I/O systems. Our
approach is based on the use of a platform-independent
performance metamodel, which is a radial basis function
neural network. Given training data, the metamodel
generates a performance model automatically and
efficiently for a parallel I/O system on a given
platform. Experiments suggest that our goal of having
the generated model provide accurate performance
predictions is attainable, for the parallel I/O library
that served as our experimental testbed on an IBM SP.
This suggests that it is possible to model parallel I/O
system performance automatically and portably, and
perhaps to model a broader class of storage systems as
well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2002:EFL,
author = "Alma Riska and Vesselin Diev and Evgenia Smirni",
title = "Efficient fitting of long-tailed data sets into
phase-type distributions",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "6--8",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605525",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new technique for fitting long-tailed
data sets into phase-type (PH) distributions. This
technique fits data sets with non-monotone densities
into a mixture of Erlang and hyperexponential
distributions, and data sets with completely monotone
densities into hyperexponential distributions. The
method first partitions the data set in a divide and
conquer fashion and then uses the
Expectation-Maximization (EM) algorithm to fit the data
of each partition into a PH distribution. The fitting
results for each partition are combined to generate the
final fitting for the entire data set. The new method
is accurate, efficient, and allows one to apply
existing analytic tools to analyze the behavior of
queueing systems that operate under workloads that
exhibit long-tail behavior, such as queues in
Internet-related systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2002:USL,
author = "Mor Harchol-Balter and Karl Sigman and Adam Wierman",
title = "Understanding the slowdown of large jobs in an
{M/GI/1} system",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "9--11",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We explore the performance of an M/GI/1 queue under
various scheduling policies from the perspective of a
new metric: the it slowdown experienced by largest
jobs. We consider scheduling policies that bias against
large jobs, towards large jobs, and those that are
fair, e.g., Processor-Sharing. We prove that as job
size increases to infinity, all work conserving
policies converge almost surely with respect to this
metric to no more than 1/(1-{\rho}), where {\rho}
denotes load. We also find that the expected slowdown
under any work conserving policy can be made
arbitrarily close to that under Processor-Sharing, for
all job sizes that are sufficiently large.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2002:MPS,
author = "Leana Golubchik and John C. S. Lui",
title = "Multi-path streaming: is it worth the trouble?",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "12--14",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605527",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2002:FSU,
author = "Y. Baryshnikov and E. Coffman and P. Jelenkovi{\'c}
and P. Mom{\v{c}}ilovi{\'c} and D. Rubenstein",
title = "Flood search under the {California} split strategy",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "15--16",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605528",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a version of the problem of searching
peer-to-peer networks by means of {\em floods}, or {\em
expanding rings\/}; when a network reduces to a path,
then the term flood becomes the more familiar search
term `scan,' which is the focus of this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2002:RTE,
author = "Vladimir Marbukh",
title = "Robust traffic engineering: game theoretic
perspective",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "17--19",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605529",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "On-line routing algorithms deal with requests as they
arrive without assuming any knowledge of the underlying
process that generates the streams of requests. By
contrast, off-line traffic engineering algorithms
assume complete statistical knowledge of the request
generating process. This dichotomy, however,
oversimplifies many practical situations when some
incomplete information on the expected demands is
available, and proper utilization of the available
information may improve the network performance. This
paper proposes a game theoretic framework for robust
traffic engineering intended to guard against the worst
case scenario with respect to possible uncertainties in
the external demands and link loads. The proposed
framework can be interpreted as a game of the routing
algorithm attempting to optimize the network
performance and the adversarial environment attempting
to obstruct these efforts by selecting the worst case
scenario with respect to the uncertainties. Two
different classes of schemes are considered: first,
suitable for {\em MPLS\/} implementation, centralized
schemes, and, second, suitable for {\em OSPF-OMP\/}
implementation, decentralized schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "equal cost multi-path; game theoretic framework; MPLS;
OSPF-OMP; robustness; stability; traffic engineering;
uncertain demand",
}
@Article{Benaboud:2002:ASC,
author = "H. Benaboud and A. Berqia and N. Mikou",
title = "An analytical study of {CANIT} algorithm in {TCP}
protocol",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "20--22",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em CANIT\/} (Congestion Avoidance with Normalized
Interval of Time) algorithm is a new policy for TCP
congestion avoidance which is proposed in order to
improve TCP fairness over long delay links. {\em
CANIT\/} uses a new parameter referred to NIT ({\em
Normalized Interval of Time\/}), which is the key of
this algorithm. In former works, we showed by
simulations of some configuration with various value of
NIT parameter, that using our algorithm instead of the
standard one, improves the TCP fairness as well as the
utilisation of network resources. In this work, we
propose an analytical study and we give the basic
equations in order to find the optimal value of NIT
parameter which provides more fairness and better
bandwidth utilisation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kamal:2002:MTR,
author = "Ahmed E. Kamal",
title = "Modeling {TCP Reno} with {RED}-based routers",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "23--25",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to introduce an accurate
performance model for the TCP Reno version in the
presence of a bottlenecked router which uses the Random
Early Detection (RED) active queue The paper makes two
contributions: $ \bullet $ It introduces an accurate
model of a target source operating according to the TCP
Reno mechanism in the presence of background traffic.
The background traffic is represented by a general
discrete batch Markov arrival process (D-BMAP), which
is modified in order to make the phase transitions
dependent on packet losses. It can therefore be used to
model a collection of UDP and/or TCP sources. Under
this model, packets are dropped only when the router is
congested, or when the RED protocol is invoked, i.e.,
the buffer occupancy is taken into account. $ \bullet $
The paper also introduces an accurate model of the RED
mechanism, which tracks the evolution of the difference
between the instantaneous and average queue sizes. This
representation is chosen since the average queue size
tends to follow the instantaneous queue size, and
therefore the difference between them is usually
limited. This model is different from the models
presented in the literature for TCP in a number of
ways. Unlike [1, 2] where packet losses are random, and
independent of the actual buffer occupancy, our model
captures the buffer occupancy, and the actual packet
losses due to buffer overflow. This paper also models
the cross traffic using a general process. Reference
[3] considered the effect of cross traffic only by
modeling the service times using a random process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Samios:2002:MTT,
author = "Charalampos (Babis) Samios and Mary K. Vernon",
title = "Modeling throughput in {TCP Vegas}",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "26--28",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This abstract describes a simple and accurate analytic
model for the steady state throughput of TCP Vegas, as
a function of round trip time and packet loss rate.
Such models have previously been developed for TCP
Reno. However, several aspects of TCP Vegas need to be
treated quite differently from their counterparts in
Reno. In particular, TCP Vegas employs an algorithm to
detect the incipient stages of congestion in the
network and preemptively adjusts the sending rate to
avoid losses. The proposed model reflects this
behavior, as well as Vegas' new slow start mechanism,
and the most important of the innovative congestion
recovery mechanisms introduced in TCP Vegas. Initial
validations against the ns-2 simulator configured to
simulate TCP Vegas are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandramouli:2002:MAU,
author = "Y. Chandramouli and Arnold L. Neidhardt",
title = "Mathematical analysis of the use of application-level
traffic measurements for capacity engineering",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "29--31",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In general, the traffic characteristics of the
individual applications that constitute the aggregate
traffic on a network can be important for capacity
engineering. In this paper, we demonstrate, based on
mathematical analysis, the value of
application-specific measurements, even when there is
no service differentiation. Specifically, under certain
assumptions, we obtain the result that engineering
errors occur when traffic characteristics of individual
applications are ignored, and that the errors are in
the direction of under-engineering. The assumptions are
that the individual applications can be modeled
adequately as Fractional Brownian Motions and that
measurements are available only at relatively coarse
time scales (as is common presently). The results in
this paper emphasize the value of collecting
fine-grained traffic measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xia:2002:TMP,
author = "Cathy H. Xia and Zhen Liu and Mark S. Squillante and
Li Zhang and Naceur Malouch",
title = "Traffic modeling and performance analysis of
commercial {Web} sites",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "32--34",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haas:2002:VLR,
author = "Peter J. Haas and Peter W. Glynn",
title = "On the validity of long-run estimation methods for
discrete-event systems",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "35--37",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2002:CSP,
author = "David Gamarnik",
title = "Computing stationary probability distributions and
large deviation rates for constrained random walks: the
undecidability results",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "38--40",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our model is a constrained homogeneous random walk in
$ Z + d $. The convergence to stationarity for such a
random walk can often be checked by constructing a
Lyapunov function. The same Lyapunov function can also
be used for computing approximately the stationary
distribution of this random walk, using methods
developed in [11]. In this paper we show that computing
exactly the stationary probability for this type of
random walks is an undecidable problem: no algorithm
can exist to achieve this task. We then prove that
computing large deviation rates for this model is also
an undecidable problem. We extend these results to a
certain type of queueing systems. The implication of
these results is that no useful formulas for computing
stationary probabilities and large deviations rates can
exist in these systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harrison:2002:PFC,
author = "Peter G. Harrison",
title = "Product-forms from a {CAT} and {DOG}",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "41--43",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The equilibrium state space probabilities of a
stationary Markov chain can be obtained immediately
from its reversed process. There are two main steps in
the derivation of product-form solutions for
multi-dimensional Markov chains using this approach.
First, the reversed process must be determined. This is
achieved for a wide class of cooperating processes
using a compound agent theorem (CAT), a compositional
result from Markovian Process Algebra (MPA). Secondly,
a path to each state must be found from some specified
reference state. This is usually obtained in a simple
way by considering the components of the state in order
of dimension, e.g. in a dimension-ordered graphical
(DOG) representation. In this note, the main results
for reversing a stationary compound Markov process,
under appropriate conditions, are given and applied to
deriving product-forms. No balance equations are
solved.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allman:2003:EXR,
author = "Mark Allman",
title = "An evaluation of {XML-RPC}",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "2--11",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper explores the complexity and performance of
the XML-RPC system for remote method invocation. We
developed a program that can use either XML-RPC-based
network communication or a hand-rolled version of
networking code based on the java.net package. We first
compare our two implementations using traditional
object-oriented metrics. In addition, we conduct tests
over a local network and the Internet to assess the
performance of the two versions of the networking code
using traditional internetworking metrics. We find that
XML-RPC reduces the programming complexity of the
software by roughly 50\% (across various metrics). On
the other hand, the hand-rolled java.net-based
implementation offers up to an order of magnitude
better network performance in some of our tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weissman:2003:GES,
author = "Jon Weissman",
title = "Guest editorial: special issue on grid computing",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "12--12",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Taylor:2003:PIP,
author = "Valerie Taylor and Xingfu Wu and Rick Stevens",
title = "{Prophesy}: an infrastructure for performance analysis
and modeling of parallel and {Grid} applications",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "13--18",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance is an important issue with any
application, especially grid applications. Efficient
execution of applications requires insight into how the
system features impact the performance of the
applications. This insight generally results from
significant experimental analysis and possibly the
development of performance models. This paper present
the Prophesy system, for which the novel component is
the model development. In particular, this paper
discusses the use of our {\em coupling parameter\/}
(i.e., a metric that attempts to quantify the
interaction between kernels that compose an
application) to develop application models. We discuss
how this modeling technique can be used in the analysis
of grid applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "grid applications; grid systems; parallel
applications; performance analysis; performance
modeling",
}
@Article{Lowekamp:2003:CAP,
author = "Bruce B. Lowekamp",
title = "Combining active and passive network measurements to
build scalable monitoring systems on the {Grid}",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "19--26",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Because the network provides the wires that connect a
grid, understanding the performance provided by a
network is crucial to achieving satisfactory
performance from many grid applications. Monitoring the
network to predict its performance for applications is
an effective solution, but the costs and scalability
challenges of actively injecting measurement traffic,
as well as the information access and accuracy
challenges of using passively collected measurements,
complicate the problem of developing a monitoring
solution for a global grid. This paper is a preliminary
report on the Wren project, which is focused on
developing scalable solutions for network performance
monitoring. By combining active and passive monitoring
techniques, Wren is able to reduce the need for
invasive measurements of the network without
sacrificing measurement accuracy on either the WAN or
LAN levels. Specifically, we present topology-based
steering, which dramatically reduces the number of
measurements taken for a system by using passively
acquired topology and utilization to select the
bottleneck links that require active bandwidth probing.
Furthermore, by using passive measurements while an
application is running and active measurements when
none is running, we preserve our ability to offer
accurate, timely predictions of network performance,
while eliminating additional invasive measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snavely:2003:BGC,
author = "Allan Snavely and Greg Chun and Henri Casanova and Rob
F. {Van der Wijngaart} and Michael A. Frumkin",
title = "Benchmarks for {Grid} computing: a review of ongoing
efforts and future directions",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "27--32",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773062",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Grid architectures are collections of computational
and data storage resources linked by communication
channels for shared use. It is important to deploy
measurement methods so that Grid applications and
architectures can evolve guided by scientific
principles. Engineering pursuits need agreed upon
metrics---a common language for communicating results,
so that alternative implementations can be compared
quantitatively. Users of systems need performance
parameters that describe system capabilities so that
they can develop and tune their applications.
Architects need examples of how users will exercise
their system to improve the design. The Grid community
is building systems such as the TeraGrid [1] and The
Informational Power Grid [2] while applications that
can fully benefit from such systems are also being
developed. We conclude that the time to develop and
deploy sets of Grid benchmarks is now. This article
reviews fundamental principles, early efforts, and
benefits of Grid benchmarks to the study and design of
Grids.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmarks; grid computing",
}
@Article{Lu:2003:GGR,
author = "Dong Lu and Peter A. Dinda",
title = "{GridG}: generating realistic computational grids",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "33--40",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A realistic workload is essential in evaluating
middleware for computational grids. One important
component of that workload is the raw grid itself: an
annotated graph representing the network topology and
the hardware and software available on each node and
link within it. GridG is an extensible synthetic
generator of such graphs that is implemented as a
series of transformations on a common graph format. The
paper provides a definition of and requirements for
grid generation. We then describe the GridG process in
two steps: topology generation and annotation. For
topology generation, we have both a model and a
mechanism. We leverage Tiers, an existing tool commonly
used in the networking community, but we extend it to
produce graphs that conform to recently discovered
power laws of Internet topology. We also contribute to
the theory of network topology by pointing out a
contradiction between two laws, and proposing a new
version of one of them. For annotation, we have
developed a mechanism, the {\em requirements\/} for a
model, and identified the open problem of
characterizing the distribution and correlation of
hardware and software resources on the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolski:2003:EPR,
author = "Rich Wolski",
title = "Experiences with predicting resource performance
on-line in computational grid settings",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "41--49",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe methods for predicting the
performance of Computational Grid resources (machines,
networks, storage systems, etc.) using computationally
inexpensive statistical techniques. The predictions
generated in this manner are intended to support
adaptive application scheduling in Grid settings, and
on-line fault detection. We describe a
mixture-of-experts approach to non-parametric,
univariate time-series forecasting, and detail the
effectiveness of the approach using example data
gathered from `production' (i.e. non-experimental)
Computational Grid installations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Girbal:2003:DSR,
author = "Sylvain Girbal and Gilles Mouchard and Albert Cohen
and Olivier Temam",
title = "{DiST}: a simple, reliable and scalable method to
significantly reduce processor architecture simulation
time",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "1--12",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While architecture simulation is often treated as a
methodology issue, it is at the core of most processor
architecture research works, and simulation speed is
often the bottleneck of the typical trial-and-error
research process. To speedup simulation during this
research process and get trends faster, researchers
usually reduce the trace size. More sophisticated
techniques like trace sampling or distributed
simulation are scarcely used because they are
considered unreliable and complex due to their impact
on accuracy and the associated warm-up issues. In this
article, we present DiST, a practical distributed
simulation scheme where, unlike in other simulation
techniques that trade accuracy for speed, the user is
relieved from most accuracy issues thanks to an
automatic and dynamic mechanism for adjusting the
warm-up interval size. Moreover, the mechanism is
designed so as to always privilege accuracy over
speedup. The speedup scales with the amount of
available computing resources, bringing an average 7.35
speedup on 10 machines with an average IPC error of
1.81\% and a maximum IPC error of 5.06\%.Besides
proposing a solution to the warm-up issues in
distributed simulation, we experimentally show that our
technique is significantly more accurate than trace
size reduction or trace sampling for identical
speedups. We also show that not only the error always
remains small for IPC and other metrics, but that a
researcher can reliably base research decisions on DiST
simulation results. Finally, we explain how the DiST
tool is designed to be easily pluggable into existing
architecture simulators with very few modifications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed simulation; processor architecture",
}
@Article{Aamodt:2003:FMO,
author = "Tor M. Aamodt and Pedro Marcuello and Paul Chow and
Antonio Gonz{\'a}lez and Per Hammarlund and Hong Wang
and John P. Shen",
title = "A framework for modeling and optimization of prescient
instruction prefetch",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "13--24",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a framework for modeling
macroscopic program behavior and applies it to
optimizing prescient instruction prefetch --- novel
technique that uses helper threads to improve
single-threaded application performance by performing
judicious and timely instruction prefetch. A helper
thread is initiated when the main thread encounters a
spawn point, and prefetches instructions starting at a
distant target point. The target identifies a code
region tending to incur I-cache misses that the main
thread is likely to execute soon, even though
intervening control flow may be unpredictable. The
optimization of spawn-target pair selections is
formulated by modeling program behavior as a Markov
chain based on profile statistics. Execution paths are
considered stochastic outcomes, and aspects of program
behavior are summarized via path expression mappings.
Mappings for computing reaching, and posteriori
probability; path length mean, and variance; and
expected path footprint are presented. These are used
with Tarjan's fast path algorithm to efficiently
estimate the benefit of spawn-target pair selections.
Using this framework we propose a spawn-target pair
selection algorithm for prescient instruction prefetch.
This algorithm has been implemented, and evaluated for
the Itanium Processor Family architecture. A limit
study finds 4.8\%to 17\% speedups on an in-order
simultaneous multithreading processor with eight
contexts, over nextline and streaming I-prefetch for a
set of benchmarks with high I-cache miss rates. The
framework in this paper is potentially applicable to
other thread speculation techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; helper threads; instruction
prefetch; multithreading; optimization; path
expressions",
}
@Article{Xia:2003:QSL,
author = "Cathy H. Xia and Zhen Liu",
title = "Queueing systems with long-range dependent input
process and subexponential service times",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "25--36",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the asymptotic tail distribution of
stationary waiting times and stationary virtual waiting
times in a single-server queue with long-range
dependent arrival process and subexponential service
times. We investigate the joint impact of the long
range dependency of the arrival process and of the tail
distribution of the service times. We consider two
traffic models that have been widely used to
characterize the long-range dependence structure,
namely, the M/G/8 input model and the Fractional
Gaussian Noise (FGN) model. We focus on the response
times of the customers in a First-Come First-Serve
(FCFS) queueing system, although the results carry
through to the backlog distribution of the system with
any arbitrary queueing discipline. When the arrival
process is driven by an M/G/8 input model we show that
if the residual service time tail distribution $ F_e $
is lighter than the residual session duration $ G_e $,
then the stationary waiting time is dominated by the
long-range dependence structure, which is determined by
the residual session duration $ G_e $. If the residual
service time distribution $ F_e $ is heavier than the
residual session duration $ G_e $, then the tail
distribution of the stationary waiting time is
dominated by that of the residual service time. When
the arrival process is modeled by an FGN, we show that
the waiting time tail distribution is asymptotically
equal to the tail distribution of the residual service
time if the latter is asymptotically heavier than
Weibull distribution with shape parameter $ 2 - 2 H $,
where $H$ is the Hurst parameter of the FGN. If,
however, this residual service time is asymptotically
lighter than Weibull distribution with shape parameter
$ 2 - 2 H$, then the waiting time tail distribution is
dominated by the dependence structure of the arrival
process so that it is asymptotically equal to Weibull
distribution with shape parameter $ 2 - 2 H$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic queueing analysis; FGN; long-range
dependency; M/G/8; subexponential distributions",
}
@Article{Galmes:2003:ACM,
author = "Sebasti{\`a} Galm{\'e}s and Ramon Puigjaner",
title = "An algorithm for computing the mean response time of a
single server queue with generalized on\slash off
traffic arrivals",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "37--46",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, an exact solution for the response time
distribution of a single server, infinite capacity,
discrete-time queue is presented. This queue is fed by
a flexible discrete-time arrival process, which follows
an on/off evolution. A workload variable is associated
with each arrival instant, which may correspond to the
service demand generated by a single arrival, or
represent the number of simultaneous arrivals (bulk
arrivals). Accordingly, the analysis focuses on two
types of queues: (On/Off)/G/1 and (Batch-On/Off)/D/1.
For both cases, a decomposition approach is carried
out, which divides the problem into two contributions:
the response time experienced by single bursts in
isolation, and the increase on the response time caused
by the unfinished work that propagates from burst to
burst. Particularly, the solution for the unfinished
work is derived from a Wiener--Hopf factorization of
random walks, which was already used in the analysis of
discrete GI/G/1 queues. Compared to other related
works, the procedure proposed in this paper is exact,
valid for any traffic intensity and has no constraints
on the distributions of the input random variables
characterizing the process: duration of on and off
periods, and workload. From the general solution, an
efficient and robust iterative algorithm for computing
the expected response time of both queues is developed,
which can provide results at any desired precision.
This algorithm is numerically evaluated for different
types of input distributions and proved against
simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "arrival process; Markov chain; queuing model; response
time; steady-state",
}
@Article{Garetto:2003:MSM,
author = "Michele Garetto and Don Towsley",
title = "Modeling, simulation and measurements of queuing delay
under long-tail {Internet} traffic",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "47--57",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe an analytical approach for
estimating the queuing delay distribution on an
Internet link carrying realistic TCP traffic, such as
that produced by a large number of finite-size
connections transferring files whose sizes are taken
from a long-tail distribution. The analytical
predictions are validated against detailed simulation
experiments and real network measurements. Despite its
simplicity, our model proves to be accurate and robust
under a variety of operating conditions, and offers
novel insights into the impact on the network of
long-tail flow length distributions. Our contribution
is a performance evaluation methodology that could be
usefully employed in network dimensioning and
engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Markovian models; queueing analysis; TCP",
}
@Article{Bohacek:2003:HSM,
author = "Stephan Bohacek and Jo{\~a}o P. Hespanha and Junsoo
Lee and Katia Obraczka",
title = "A hybrid systems modeling framework for fast and
accurate simulation of data communication networks",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "58--69",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a general hybrid systems
modeling framework to describe the flow of traffic in
communication networks. To characterize network
behavior, these models use averaging to continuously
approximate discrete variables such as congestion
window and queue size. Because averaging occurs over
short time intervals, one still models discrete events
such as the occurrence of a drop and the consequent
reaction (e.g., congestion control). The proposed
hybrid systems modeling framework fills the gap between
packet-level and fluid-based models: by averaging
discrete variables over a very short time scale (on the
order of a round-trip time), our models are able to
capture the dynamics of transient phenomena fairly
accurately. This provides significant flexibility in
modeling various congestion control mechanisms,
different queuing policies, multicast transmission,
etc. We validate our hybrid modeling methodology by
comparing simulations of the hybrid models against
packet-level simulations. We find that the probability
density functions produced by ns-2 and our hybrid model
match very closely with an $ L^1$-distance of less than
1\%. We also present complexity analysis of ns-2 and
the hybrid model. These tests indicate that hybrid
models are considerably faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; data communication networks;
hybrid systems; simulation; TCP; UDP",
}
@Article{Samios:2003:MTT,
author = "Charalampos (Babis) Samios and Mary K. Vernon",
title = "Modeling the throughput of {TCP Vegas}",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "71--81",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous analytic models of TCP Vegas throughput have
been developed for loss-free (all-Vegas) networks. This
work develops a simple and accurate analytic model for
the throughput of a TCP Vegas bulk transfer in the
presence of packet loss, as a function of average round
trip time, minimum round trip time, and loss rate for
the transfer. Similar models have previously been
developed for TCP Reno. However, several aspects of TCP
Vegas need to be treated differently than their
counterparts in Reno. The proposed model captures the
key innovative mechanisms that Vegas employs during
slow start, congestion avoidance, and congestion
recovery. The results include (1) a simple, validated
model of TCP Vegas throughput that can be used for
equation-based rate control of other flows such as UDP
streams, (2) a simple formula to determine, from the
measured packet loss rate, whether the network buffers
are overcommitted and thus the TCP Vegas flow cannot
reach the specified target lower threshold on
throughput, (3) new insights into the design and
performance of TCP Vegas, and (4) comparisons between
TCP Vegas and TCP Reno including new insights regarding
incremental deployment of TCP Vegas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance model; TCP; TCP Vegas; throughput",
}
@Article{Wang:2003:MAU,
author = "Jiantao Wang and Ao Tang and Steven H. Low",
title = "Maximum and asymptotic {UDP} throughput under
{CHOKe}",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "82--90",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/885651.781038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recently proposed active queue management, CHOKe,
aims to protect TCP from UDP flows. Simulations have
shown that as UDP rate increases, its bandwidth share
initially rises but eventually drops. We derive an
approximate model of CHOKe and show that, provided the
number of TCP flows is large, the UDP bandwidth share
peaks at {\em (e+1)$^{-1}$ = 0.269\/} when the UDP
input rate is slightly larger than the link capacity,
and drops to zero as UDP input rate tends to infinity,
regardless of the TCP algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AQM; bandwidth share; CHOKe; TCP; UDP",
}
@Article{Liu:2003:FMS,
author = "Yong Liu and Francesco {Lo Presti} and Vishal Misra
and Don Towsley and Yu Gu",
title = "Fluid models and solutions for large-scale {IP}
networks",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "91--101",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/885651.781039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a scalable model of a network
of Active Queue Management (AQM) routers serving a
large population of TCP flows. We present efficient
solution techniques that allow one to obtain the
transient behavior of the average queue lengths, packet
loss probabilities, and average end-to-end latencies.
We model different versions of TCP as well as different
versions of RED, the most popular AQM scheme currently
in use. Comparisons between our models and ns
simulation show our models to be quite accurate while
at the same time requiring substantially less time to
solve, especially when workloads and bandwidths are
high.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid model; large-scale IP networks; simulation",
}
@Article{Harrison:2003:GNP,
author = "P. G. Harrison",
title = "{G}-networks with propagating resets via {RCAT}",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "3--5",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959144",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stationary Markovian networks, defined by a collection
of cooperating agents, can be solved for their
equilibrium state probability distribution by a new
compositional method that computes their reversed
Markov process, under appropriate conditions. We apply
this approach to G-networks with chains of triggers and
generalised resets, which have some quite distinct
properties from the resets proposed recently.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2003:MTV,
author = "Adam Wierman and Takayuki Osogami and J{\"o}rgen
Ols{\'e}n",
title = "Modeling {TCP-Vegas} under on\slash off traffic",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "6--8",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2003:WIS,
author = "David Gamarnik and John Hasenbein",
title = "Weak instability in stochastic and fluid queueing
networks",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "9--10",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The fluid model has proven to be one of the most
effective tools for the analysis of stochastic queueing
networks, specifically for the analysis of stability.
It is known that stability of a fluid model implies
positive (Harris) recurrence (stability) of a
corresponding stochastic queueing network, and weak
stability implies rate stability of a corresponding
stochastic network. These results have been established
both for cases of specific scheduling policies and for
the class of all work conserving policies. However,
only partial converse results have been established and
in certain cases converse statements do not hold. In
this paper we close one of the existing gaps. For the
case of networks with two stations we prove that if the
fluid model is not weakly stable under the class of all
work conserving policies, then any corresponding
queueing network is not rate stable under the class of
all work conserving policies. We establish the result
by building a particular work conserving scheduling
policy which makes any corresponding stochastic process
transient. An important corollary of our result is that
the condition of the form {\rho}*",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid limits; Harris recurrence; large deviations",
}
@Article{Duarte:2003:AFA,
author = "Fl{\'a}vio P. Duarte and Edmundo {de Souza e Silva}
and Don Towsley",
title = "An adaptive {FEC} algorithm using hidden {Markov}
chains",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "11--13",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andrew:2003:AOG,
author = "Lachlan L. H. Andrew and Yuliy Baryshnikov and E. G.
Coffman and Stephen V. Hanly and Jolyon White",
title = "An asymptotically optimal greedy algorithm for large
optical burst switching systems",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "14--16",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the number of wavelengths in OBS systems increases,
the utilization achievable for a given blocking
probability can be made to approach 100\%. This paper
shows that this property applies to a wavelength
allocation algorithm of greedy type. Another property
of this rule, one shared by most other wavelength
assignment algorithms, is that, since lost traffic
tends to occur near destinations, where the resource
usage wasted by such traffic is large, very low
blocking probabilities are important for efficient
operation. To help identify regions of low blocking
probability, we derive an asymptotically exact
condition for zero blocking probabilities; it has a
form reminiscent of the stability condition of the
M/G/1 queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid limits; hydrodynamic limits; optical burst
switching; optical networks; stochastic modeling;
wavelength division multiplexing",
}
@Article{Marbukh:2003:TMF,
author = "Vladimir Marbukh",
title = "Towards mean field theory of wireless networks",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "17--19",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes to leverage a large body of
results on performance evaluation and optimization of
wire-line networks for obtaining relevant results for
wireless networks by using mean field approximation
based on the `effective' link capacities. We derive
mean field equations for the effective link capacities
and demonstrate how these capacities can be used for
evaluating the throughput regions as a function of the
channel model as well as transmission and routing
protocols. We also discuss possibility of using mean
field approximation for assessing the quality of
service as a function of the external demands within
the throughput region.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mean field approximation; performance; wireless
network",
}
@Article{Lam:2003:PQS,
author = "Sum Lam and Rocky K. C. Chang",
title = "Per-queue stability analysis of a dynamic quota
sharing scheme for wireless networks",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "20--22",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider a dynamic quota sharing
scheme to support different classes of data traffic in
wireless networks. The novelty of this scheme enables
the lower-priority classes of traffic to use what has
not been used by the higher-priority classes. We have
performed per-queue stability analysis for this scheme.
Based on the stability results, threshold values can be
appropriately determined to fulfill certain throughput
requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2003:IPN,
author = "Richard T. B. Ma and C. M. Lee and John C. S. Lui and
David K. Y. Yau",
title = "Incentive {P2P} networks: a protocol to encourage
information sharing and contribution",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "23--25",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2003:PDR,
author = "Eitan Bachmat",
title = "On the performance of {D}-redundant storage systems",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "26--27",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "AD-redundant storage system is a system containing D
identical disks which hold data whose total capacity is
that of a single disk.\par
A simple example of a D-redundant storage system is the
D-shadowed disk system in which there are D copies of
each data element. These copies are placed at identical
locations on the different disks.\par
The existence of multiple copies can be exploited to
improve read request access time. In a shadowed system,
for example, a read request may be serviced by the disk
whose head position is closest to the copy of the
requested data. In this note we will assume for
simplicity that all requests are read requests. The
analysis of write requests has a different character
since writes may in general be serviced
asynchronously.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2003:ABM,
author = "Alma Riska and Evgenia Smirni and Gianfranco Ciardo",
title = "An aggregation-based method for the exact analysis of
a class of {GI/G/1}-type processes",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "28--30",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an aggregation-based algorithm for the
exact analysis of Markov chains with GI/G/1-type
pattern in their repetitive structure, i.e., chains
that exhibit {\em both\/} M/G/1-type and GI/M/1-type
patterns and cannot be solved with existing techniques.
Markov chains with a GI/G/1 pattern result when
modeling open systems with faults/repairs that accept
jobs from multiple exogenous sources. Our method
provides exact computation of the steady state
probabilities, and allows computation of performance
measures of interest including the system queue length
or any of its higher moments, the exact probability of
system failures and repairs, and consequently a host of
performability measures. Our algorithm also applies to
systems that are purely of the M/G/1-type or the
GI/M/1-type, or their intersection, i.e.,
quasi-birth-death processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GI/G/1-type processes; GI/M/1-type processes;
M/G/1-type processes; Markov chains; matrix-analytic
techniques; reliability analysis; stochastic
complementation",
}
@Article{Lin:2003:HDQ,
author = "Wuqin Lin and Zhen Liu and Harry Stavropoulos and
Cathy H. Xia",
title = "Hard deadline queueing system with application to
unified messaging service",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "31--33",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a queueing system with jobs having hard
deadlines. This is motivated by recent emerging unified
messaging service applications. The service time of a
job is assumed to be known upon arrival. A job will be
lost if not being served by its deadline. For the
single-server system, we propose an on-line ED-Push
algorithm that is easy to implement and can achieve
near-optimal performance in terms of minimizing the
loss probability. Performance analyses for the
underlying M/M/l+D and G/D/1+D systems are then
provided. We also give approximation on the loss
probability for the system with multiple servers under
least workload routing scheme. The numerical results
show that ED-Push algorithm performs well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bansal:2003:AST,
author = "Nikhil Bansal",
title = "On the average sojourn time under {M/M/1/SRPT}",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "34--35",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an M/M/1 queueing system under the
Shortest Remaining Processing Time (SRPT) policy. We
show that there are constants $ c_l $ and $ c_2 $ such
the average sojourn time under SRPT lies between $
c_l(\mu (1 \rho) \log 1 / (1 - \rho))^{-1} $ and $ c_2
(\mu (l - \rho) \log 1 / (1 - \rho))^{-1} $, where $
\mu $ denotes the service rate and $ \rho $ denotes the
load. Comparing this with the classic result that any
scheduling policy that does not use the knowledge of
job sizes has average sojourn time $ (\mu (1 -
\rho))^{-1} $, implies that SRPT offers a non-constant
improvement over such policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feng:2003:MSD,
author = "Hanhua Feng and Vishal Misra",
title = "Mixed scheduling disciplines for network flows",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "36--39",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a novel method to prove that the FBPS
discipline has optimal mean sojourn time and mean
slowdown ratio for DHR service time distributions in an
M/G/1 queue. We then discuss the problems related to
FBPS, and propose a new scheduling discipline to
overcome these problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghosh:2003:RCS,
author = "Soumyadip Ghosh and Mark S. Squillante",
title = "Revisiting correlations and scheduling in {Web}
servers",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "40--42",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Titchkosky:2003:PCD,
author = "Lance Titchkosky and Martin Arlitt and Carey
Williamson",
title = "A performance comparison of dynamic {Web}
technologies",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "2--11",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today, many Web sites dynamically generate responses
`on the fly' when user requests are received. In this
paper, we experimentally evaluate the impact of three
different dynamic content technologies (Perl, PHP, and
Java) on Web server performance. We quantify achievable
performance first for static content serving, and then
for dynamic content generation, considering cases both
with and without database access. The results show that
the overheads of dynamic content generation reduce the
peak request rate supported by a Web server up to a
factor of 8, depending on the workload characteristics
and the technologies used. In general, our results show
that Java server technologies typically outperform both
Perl and PHP for dynamic content generation, though
performance under overload conditions can be erratic
for some implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Dynamic Content Generation; Performance Evaluation;
Web Performance; Web Server Benchmarking",
}
@Article{Allman:2003:ELR,
author = "Mark Allman and Wesley M. Eddy and Shawn Ostermann",
title = "Estimating loss rates with {TCP}",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "12--24",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimating loss rates along a network path is a
problem that has received much attention within the
research community. However, deriving accurate
estimates of the loss rate from TCP transfers has been
largely unaddressed. In this paper, we first show that
using a simple count of the number of retransmissions
yields inaccurate estimates of the loss rate in many
cases. The mis-estimation stems from flaws in TCP's
retransmission schemes that cause the protocol to
spuriously retransmit data in a number of cases. Next,
we develop techniques for refining the retransmission
count to produce a better loss rate estimate for both
Reno and SACK variants of TCP. Finally, we explore two
SACK-based variants of TCP with an eye towards reducing
spurious retransmits, the root cause of the
mis-estimation of the loss rate. An additional benefit
of reducing the number of needless retransmits is a
reduction in the amount of shared network resources
used to accomplish no useful work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Douceur:2003:RHA,
author = "John R. Douceur",
title = "Is remote host availability governed by a universal
law?",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "25--29",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The availability of peer-to-peer and other distributed
systems depends not only on the system architecture but
also on the availability characteristics of the hosts
participating in the system. This paper constructs a
model of remote host availability, derived from
measurement studies of four host populations. It argues
that hosts are incompletely partitioned into two
behavioral classes, one in which they are cycled on/off
periodically and one in which they are nominally kept
on constantly. Within a class, logarithmic availability
generally follows a uniform distribution; however, the
underlying reason for this is not readily apparent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brebner:2003:JIS,
author = "Paul Brebner and Jeffrey Gosper",
title = "{J2EE} infrastructure scalability and throughput
estimation",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "30--36",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "ECperf, the widely recognized industry standard J2EE
benchmark, has attracted a large number of results
submissions and their subsequent publication. However,
ECperf places little restriction on the hardware
platform, operating systems and databases utilized in
the benchmarking process. This, combined with the
existence of only two primary metrics, makes it
difficult to accurately compare the results, or the
performance of the Application Server products
themselves. By mining the full-disclosure archives for
trends and correlations we have discovered that J2EE
technology is very scalable with increasing middle-tier
resources, as long as the database has sufficient
resources to prevent it becoming a bottleneck. Other
observed trends include, a linear correlation between
middle-tier total processing power and throughput, as
well as between J2EE Application Server license costs
and throughput. However, the results clearly indicate
that there is an increasing cost per user with
increasing capacity systems. Finally, the correlation
between middle-tier processing power and throughput,
combined with results obtained from a different
`lighter-weight' benchmark, facilitates an estimate of
throughput for different types of J2EE applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ECperf benchmark; Enterprise Java Beans (EJB); J2EE;
scalability; throughput",
}
@Article{Cui:2003:NHA,
author = "Jike Cui and Mansur. H. Samadzadeh",
title = "A new hybrid approach to exploit localities: {LRFU}
with adaptive prefetching",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "37--43",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reviewed a number of existing methods to
exploit the spatial and temporal locality commonly
existing in programs, and provided detailed analysis
and testing of adaptive prefetching (a method designed
to utilize spatial locality) and the least recently and
frequently used (LRFU) method (a method designed to
utilize temporal locality). The two methods were
combined in this work in terms of their exploitation of
locality. The comparative studies of the methods were
done using real traces, and hit rate was used as an
evaluation measure. Results showed that by using
adaptive prefetching, the hit rate improved
significantly by an average of 11.7\% over the hit rate
of LRU in the traces and cache configurations used. It
also showed that LRFU consistently gives higher hit
rates than LRU, but not by much in the trace files and
cache configurations tested. And the X value (a
controllable parameter which determines the Weights
given to recency and frequency) has to be in a certain
range, which is usually narrow, in order to get the
best performance for hit rate. Compared to adaptive
prefetching and LRU, the hybrid approach of combining
adaptive prefetching and LRFU gave a consistently
higher hit rate also. But, affected by the performance
of LRFU, the improvement in the hit rate by the
combination was low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burger:2004:TCA,
author = "Doug Burger and Anand Sivasubramaniam",
title = "Tools for computer architecture research",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "2--3",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulators are critical tools for computer
architecture research and education. They are
invaluable when evaluating hardware designs and
enhancements, that would otherwise be prohibitively
expensive to prototype in practice. Simulators can be
useful vehicles for verifying the validity of initial
designs, understanding their cost-benefit trade-offs,
whether or not a more expensive and time-consuming
hardware prototyping effort is undertaken. In addition
to being the sole vehicle for conducting an
investigation in different research organizations,
simulators are extensively used in industry for
validating new ideas before justifying further
investments on those ideas. Further, simulators can
also serve as excellent platforms for teaching
difficult concepts in hardware and compilers, by
allowing students hands-on access to hardware and
software internals that may not be accessible
otherwise.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burger:2004:RES,
author = "Doug Burger and Todd M. Austin and Stephen W.
Keckler",
title = "Recent extensions to the {SimpleScalar Tool} suite",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "4--7",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the past eight years, the SimpleScalar Tool suite
has become the most widely used set of simulation tools
in the computer architecture research community. The
authors have recently completed an NSF-funded project
to extend and improve the SimpleScalar tools. In this
paper, we describe the extensions and improvements to
the tools, which include the capability to simulate
more instruction sets, graphical support for
performance viewing, and more simulators that model
different types of machines, including embedded
systems, ISA-specific systems, systems with operating
system, and multiprocessing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bohrer:2004:MFS,
author = "Patrick Bohrer and James Peterson and Mootaz Elnozahy
and Ram Rajamony and Ahmed Gheith and Ron Rockhold and
Charles Lefurgy and Hazim Shafi and Tarun Nakra and
Rick Simpson and Evan Speight and Kartik Sudeep and
Eric {Van Hensbergen} and Lixin Zhang",
title = "{Mambo}: a full system simulator for the {PowerPC}
architecture",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "8--12",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mambo is a full-system simulator for modeling
PowerPC-based systems. It provides building blocks for
creating simulators that range from purely functional
to timing-accurate. Functional versions support fast
emulation of individual PowerPC instructions and the
devices necessary for executing operating systems.
Timing-accurate versions add the ability to account for
device timing delays, and support the modeling of the
PowerPC processor microarchitecture. We describe our
experience in implementing the simulator and its uses
within IBM to model future systems, support early
software development, and design new system software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brooks:2004:PPS,
author = "David Brooks and Pradip Bose and Margaret Martonosi",
title = "Power-performance simulation: design and validation
strategies",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "13--18",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Microprocessor research and development increasingly
relies on detailed simulations to make design choices.
As such, the structure, speed, and accuracy of
microarchitectural simulators is of critical importance
to the field. This paper describes our experiences in
building two simulators, using related but distinct
approaches. One of the most important attributes of a
simulator is its ability to accurately convey design
trends as different aspects of the microarchitecture
are varied. In this work, we break down accuracy---a
broad term--- into two sub-types: {\em relative\/} and
{\em absolute\/} accuracy. We then discuss typical
abstraction errors in power-performance simulators and
show when they do (or do not) affect the design rule
choices a user of those simulator might make. By
performing this validation study using the Wattch and
Power Timer simulators, the work addresses validation
issues both broadly and in the specific case of a
fairly widely-used simulator.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vachharajani:2004:LSE,
author = "Manish Vachharajani and Neil Vachharajani and David A.
Penry and Jason A. Blome and David I. August",
title = "The {Liberty Simulation Environment}, version 1.0",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "19--24",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "High-level hardware modeling via simulation is an
essential step in hardware systems design and research.
Despite the importance of simulation, current model
creation methods are error prone and are unnecessarily
time consuming. To address these problems, we have
publicly released the Liberty Simulation Environment
(LSE), Version 1.0, consisting of a simulator builder
and automatic visualizer based on a shared hardware
description language. LSE's design was motivated by a
careful analysis of the strengths and weaknesses of
existing systems. This has resulted in a system in
which models are easier to understand, faster to
develop, and have performance on par with other
systems. LSE is capable of modeling {\em any\/}
synchronous hardware system. To date, LSE has been used
to simulate and convey ideas about a diverse set of
complex systems including a chip multiprocessor
out-of-order IA-64 machine and a multiprocessor system
with detailed device models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hamerly:2004:HUS,
author = "Greg Hamerly and Erez Perelman and Brad Calder",
title = "How to use {SimPoint} to pick simulation points",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "25--30",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding the cycle level behavior of a processor
running an application is crucial to modern computer
architecture research. To gain this understanding,
detailed cycle level simulators are typically employed.
Unfortunately, this level of detail comes at the cost
of speed, and simulating the full execution of an
industry standard benchmark on even the fastest
simulator can take weeks to months to complete. This
fact has not gone unnoticed, and several techniques
have been developed aimed at reducing simulation
time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hardavellas:2004:SFA,
author = "Nikolaos Hardavellas and Stephen Somogyi and Thomas F.
Wenisch and Roland E. Wunderlich and Shelley Chen and
Jangwoo Kim and Babak Falsafi and James C. Hoe and
Andreas G. Nowatzyk",
title = "{SimFlex}: a fast, accurate, flexible full-system
simulation framework for performance evaluation of
server architecture",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "31--34",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The new focus on commercial workloads in simulation
studies of server systems has caused a drastic increase
in the complexity and decrease in the speed of
simulation tools. The complexity of a large-scale
full-system model makes development of a monolithic
simulation tool a prohibitively difficult task.
Furthermore, detailed full-system models simulate so
slowly that experimental results must be based on
simulations of only fractions of a second of execution
of the modelled system. This paper presents SIMFLEX, a
simulation framework which uses component-based design
and rigorous statistical sampling to enable development
of complex models and ensure representative measurement
results with fast simulation turnaround. The novelty of
SIMFLEX lies in its combination of a unique,
compile-time approach to component interconnection and
a methodology for obtaining accurate results from
sampled simulations on a platform capable of evaluating
unmodified commercial workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:2004:STE,
author = "Debasis Mitra",
title = "Stochastic traffic engineering for demand uncertainty
and risk-aware network revenue management",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "1--1",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005687",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic traffic engineering for demand uncertainty
and risk-aware network revenue management We present a
stochastic traffic engineering framework for optimizing
bandwidth provisioning and route selection in networks.
Traffic demands are uncertain and specified by
probability distributions, and the objective is to
maximize a risk-adjusted measure of network revenue
that is generated by serving demands. Considerable
attention is given to the appropriate measure of risk
in the network model. We also advance risk-mitigation
strategies. The optimization model, which is based on
mean-risk analysis, enables a service provider to
maximize a combined measure of mean revenue and revenue
risk. The framework is intended for off-line traffic
engineering, which takes a centralized view of network
topology, link capacity and demand. We obtain
conditions under which the optimization problem is an
instance of convex programming. We study the properties
of the solution and show that it asymptotically meets
the stochastic efficiency criterion. In our numerical
investigations we illustrate the impact of demand
uncertainty on various aspects of the optimally traffic
engineered solutions. The service provider's tolerance
to risk is shown to have a strong influence on the
traffic engineering and revenue management decisions.
We develop the efficient frontier, which is the set of
Pareto optimal pairs of mean revenue and revenue risk,
to aid the service provider in selecting its operating
point.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marin:2004:CAP,
author = "Gabriel Marin and John Mellor-Crummey",
title = "Cross-architecture performance predictions for
scientific applications using parameterized models",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "2--13",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005691",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a toolkit for semi-automatically
measuring and modeling static and dynamic
characteristics of applications in an
architecture-neutral fashion. For predictable
applications, models of dynamic characteristics have a
convex and differentiable profile. Our toolkit operates
on application binaries and succeeds in modeling key
application characteristics that determine program
performance. We use these characterizations to explore
the interactions between an application and a target
architecture. We apply our toolkit to SPARC binaries to
develop architecture-neutral models of computation and
memory access patterns of the ASCI Sweep3D and the NAS
SP, BT and LU benchmarks. From our models, we predict
the L1, L2 and TLB cache miss counts as well as the
overall execution time of these applications on an
Origin 2000 system. We evaluate our predictions by
comparing them against measurements collected using
hardware performance counters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "modeling; performance analysis; prediction",
}
@Article{Huang:2004:MDS,
author = "Lan Huang and Gang Peng and Tzi-cker Chiueh",
title = "Multi-dimensional storage virtualization",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "14--24",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most state-of-the-art commercial storage
virtualization systems focus only on one particular
storage attribute, capacity. This paper describes the
design, implementation and evaluation of a {\em
multi-dimensional storage virtualization\/} system
called Stonehenge, which is able to virtualize a
cluster-based physical storage system along multiple
dimensions, including bandwidth, capacity, and latency.
As a result, Stonehenge is able to multiplex multiple
virtual disks, each with a distinct bandwidth,
capacity, and latency attribute, on a single physical
storage system as if they are separate physical disks.
A key enabling technology for Stonehenge is an
efficiency-aware real-time disk scheduling algorithm
called dual-queue disk scheduling, which maximizes disk
utilization efficiency while providing Quality of
Service (QoS) guarantees. To optimize disk utilization
efficiency, Stonehenge exploits run-time measurements
extensively, for admission control, computing
latency-derived bandwidth requirement, and predicting
disk service time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "quality of service; storage virtualization",
}
@Article{Blackburn:2004:MRP,
author = "Stephen M. Blackburn and Perry Cheng and Kathryn S.
McKinley",
title = "Myths and realities: the performance impact of garbage
collection",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "25--36",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper explores and quantifies garbage collection
behavior for three whole heap collectors and
generational counterparts: {\em copying semi-space,
mark-sweep,\/} and {\em reference counting}, the
canonical algorithms from which essentially all other
collection algorithms are derived. Efficient
implementations in MMTk, a Java memory management
toolkit, in IBM's Jikes RVM share all common mechanisms
to provide a clean experimental platform.
Instrumentation separates collector and program
behavior, and performance counters measure timing and
memory behavior on three architectures. Our
experimental design reveals key algorithmic features
and how they match program characteristics to explain
the direct and indirect costs of garbage collection as
a function of heap size on the SPEC JVM benchmarks. For
example, we find that the contiguous allocation of
copying collectors attains significant locality
benefits over free-list allocators. The reduced
collection costs of the generational algorithms
together with the locality benefit of contiguous
allocation motivates a copying {\em nursery\/} for
newly allocated objects. These benefits dominate the
overheads of generational collectors compared with
non-generational and no collection, disputing the myth
that `no garbage collection is good garbage
collection.' Performance is less sensitive to the
mature space collection algorithm in our benchmarks.
However the locality and pointer mutation
characteristics for a given program occasionally prefer
copying or mark-sweep. This study is unique in its
breadth of garbage collection algorithms and its depth
of analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "generational; java; mark-sweep; reference counting;
semi-space",
}
@Article{Jin:2004:IPS,
author = "Wei Jin and Jeffrey S. Chase and Jasleen Kaur",
title = "Interposed proportional sharing for a storage service
utility",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "37--48",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005694",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops and evaluates new share-based
scheduling algorithms for differentiated service
quality in network services, such as network storage
servers. This form of resource control makes it
possible to share a server among multiple request flows
with probabilistic assurance that each flow receives a
specified minimum share of a server's capacity to serve
requests. This assurance is important for safe
outsourcing of services to shared utilities such as
Storage Service Providers. Our approach interposes
share-based request dispatching on the network path
between the server and its clients. Two new scheduling
algorithms are designed to run within an intermediary
(e.g., a network switch), where they enforce fair
sharing by throttling request flows and reordering
requests; these algorithms are adaptations of
Start-time Fair Queuing (SFQ) for servers with a
configurable degree of internal concurrency. A third
algorithm, Request Windows (RW), bounds the outstanding
requests for each flow independently; it is amenable to
a decentralized implementation, but may restrict
concurrency under light load. The analysis and
experimental results show that these new algorithms can
enforce shares effectively when the shares are not
saturated, and that they provide acceptable performance
isolation under saturation. Although the evaluation
uses a storage service as an example, interposed
request scheduling is non-intrusive and views the
server as a black box, so it is useful for complex
services with no internal support for differentiated
service quality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "differentiated service; fair sharing; multiprocessor
scheduling; performance isolation; proportional
sharing; quality of service; storage services; utility
computing; weighted fair queuing",
}
@Article{Soule:2004:FCH,
author = "Augustin Soule and Kav{\'e} Salamatia and Nina Taft
and Richard Emilion and Konstantina Papagiannaki",
title = "Flow classification by histograms: or how to go on
safari in the {Internet}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "49--60",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In order to control and manage highly aggregated
Internet traffic flows efficiently, we need to be able
to categorize flows into distinct classes and to be
knowledgeable about the different behavior of flows
belonging to these classes. In this paper we consider
the problem of classifying BGP level prefix flows into
a small set of homogeneous classes. We argue that using
the entire distributional properties of flows can have
significant benefits in terms of quality in the derived
classification. We propose a method based on modeling
flow histograms using Dirichlet Mixture Processes for
random distributions. We present an inference procedure
based on the Simulated Annealing Expectation
Maximization algorithm that estimates all the model
parameters as well as flow {\em membership
probabilities\/} --- the probability that a flow
belongs to any given class. One of our key
contributions is a new method for Internet flow
classification. We show that our method is powerful in
that it is capable of examining macroscopic flows while
simultaneously making fine distinctions between
different traffic classes. We demonstrate that our
scheme can address issues with flows being close to
class boundaries and the inherent dynamic behaviour of
Internet flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flow classification; Internet traffic; parameter
estimation",
}
@Article{Lakhina:2004:SAN,
author = "Anukool Lakhina and Konstantina Papagiannaki and Mark
Crovella and Christophe Diot and Eric D. Kolaczyk and
Nina Taft",
title = "Structural analysis of network traffic flows",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "61--72",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network traffic arises from the superposition of
Origin-Destination (OD) flows. Hence, a thorough
understanding of OD flows is essential for modeling
network traffic, and for addressing a wide variety of
problems including traffic engineering, traffic matrix
estimation, capacity planning, forecasting and anomaly
detection. However, to date, OD flows have not been
closely studied, and there is very little known about
their properties. We present the first analysis of
complete sets of OD flow time-series, taken from two
different backbone networks (Abilene and
Sprint-Europe). Using Principal Component Analysis
(PCA), we find that the set of OD flows has small
intrinsic dimension. In fact, even in a network with
over a hundred OD flows, these flows can be accurately
modeled in time using a small number (10 or less) of
independent components or dimensions. We also show how
to use PCA to systematically decompose the structure of
OD flow timeseries into three main constituents: common
periodic trends, short-lived bursts, and noise. We
provide insight into how the various constituents
contribute to the overall structure of OD flows and
explore the extent to which this decomposition varies
over time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network traffic analysis; principal component
analysis; traffic engineering",
}
@Article{Soule:2004:HIE,
author = "Augustin Soule and Antonio Nucci and Rene Cruz and
Emilio Leonardi and Nina Taft",
title = "How to identify and estimate the largest traffic
matrix elements in a dynamic environment",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "73--84",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we investigate a new idea for traffic
matrix estimation that makes the basic problem less
under-constrained, by deliberately changing the routing
to obtain additional measurements. Because all these
measurements are collected over disparate time
intervals, we need to establish models for each
Origin-Destination (OD) pair to capture the complex
behaviours of Internet traffic. We model each OD pair
with two components: the diurnal pattern and the
fluctuation process. We provide models that incorporate
the two components above, to estimate both the first
and second order moments of traffic matrices. We do
this for both stationary and cyclo-stationary traffic
scenarios. We formalize the problem of estimating the
second order moment in a way that is completely
independent from the first order moment. Moreover, we
can estimate the second order moment without needing
any routing changes (i.e., without explicit changes to
IGP link weights). We prove for the first time, that
such a result holds for any realistic topology under
the assumption of {\em minimum cost routing\/} and {\em
strictly positive link weights}. We highlight how the
second order moment helps the identification of the top
largest OD flows carrying the most significant fraction
of network traffic. We then propose a refined
methodology consisting of using our variance estimator
(without routing changes) to identify the top largest
flows, and estimate only these flows. The benefit of
this method is that it dramatically reduces the number
of routing changes needed. We validate the
effectiveness of our methodology and the intuitions
behind it by using real aggregated sampled netflow data
collected from a commercial Tier-1 backbone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network tomography; traffic matrix estimation",
}
@Article{Duffield:2004:FSU,
author = "Nick Duffield and Carsten Lund and Mikkel Thorup",
title = "Flow sampling under hard resource constraints",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "85--96",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many network management applications use as their data
traffic volumes differentiated by attributes such as IP
address or port number. IP flow records are commonly
collected for this purpose: these enable determination
of fine-grained usage of network resources. However,
the increasingly large volumes of flow statistics incur
concomitant costs in the resources of the measurement
infrastructure. This motivates sampling of flow
records. This paper addresses sampling strategy for
flow records. Recent work has shown that non-uniform
sampling is necessary in order to control estimation
variance arising from the observed heavy-tailed
distribution of flow lengths. However, while this
approach controls estimator variance, it does not place
hard limits on the number of flows sampled. Such limits
are often required during arbitrary downstream
sampling, resampling and aggregation operations
employed in analysis of the data. This paper proposes a
correlated sampling strategy that is able to select an
arbitrarily small number of the `best' representatives
of a set of flows. We show that usage estimates arising
from such selection are unbiased, and show how to
estimate their variance, both offline for modeling
purposes, and online during the sampling itself. The
selection algorithm can be implemented in a queue-like
data structure in which memory usage is uniformly
bounded during measurement. Finally, we compare the
complexity and performance of our scheme with other
potential approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "IP flows; sampling; variance reduction",
}
@Article{Aalto:2004:TLP,
author = "Samuli Aalto and Urtzi Ayesta and Eeva
Nyberg-Oksanen",
title = "Two-level processor-sharing scheduling disciplines:
mean delay analysis",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "97--105",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005701",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Inspired by several recent papers that focus on
scheduling disciplines for network flows, we present a
mean delay analysis of Multilevel Processor Sharing
(MLPS) scheduling disciplines in the context of M/G/1
queues. Such disciplines have been proposed to model
the effect of the differentiation between short and
long TCP flows in the Internet. Under MLPS, jobs are
classified into classes depending on their attained
service. We consider scheduling disciplines where jobs
within the same class are served either with Processor
Sharing (PS) or Foreground Background (FB) policy, and
the class that contains jobs with the smallest attained
service is served first. It is known that the FB policy
minimizes (maximizes) the mean delay when the hazard
rate of the job size distribution is decreasing
(increasing). Our analysis, based on pathwise and
meanwise arguments of the unfinished truncated work,
shows that Two-Level Processor Sharing (TLPS)
disciplines, e.g., FB+PS and PS+PS, are better than PS
scheduling when the hazard rate of the job size
distribution is decreasing. If the hazard rate is
increasing and bounded, we show that PS outperforms
PS+PS and FB+PS. We further extend our analysis to
study local optimality within a level of an MLPS
scheduling discipline.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FB; LAS; M/G/1; mean delay; MLPS; PS; scheduling;
unfinished truncated work",
}
@Article{Rai:2004:PAB,
author = "Idris A. Rai and Guillaume Urvoy-Keller and Mary K.
Vernon and Ernst W. Biersack",
title = "Performance analysis of {LAS}-based scheduling
disciplines in a packet switched network",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "106--117",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Least Attained Service (LAS) scheduling policy,
when used for scheduling packets over the bottleneck
link of an Internet path, can greatly reduce the
average flow time for short flows while not
significantly increasing the average flow time for the
long flows that share the same bottleneck. No
modification of the packet headers is required to
implement the simple LAS policy. However, previous work
has also shown that a drawback of the LAS scheduler is
that, when link utilization is greater than 70\%, long
flows experience large jitter in their packet transfer
times as compared to the conventional
First-Come-First-Serve (FCFS) link scheduling. This
paper proposes and evaluates new differentiated LAS
scheduling policies that reduce the jitter for long
flows that are identified as `priority' flows. To
evaluate the new policies, we develop analytic models
to estimate average flow transfer time as a function of
flow size, and average packet transmission time as a
function of position in the flow, for the
single-bottleneck `dumbbell topology' used in many ns
simulation studies. Models are developed for FCFS
scheduling, LAS scheduling, and each of the new
differentiated LAS scheduling policies at the
bottleneck link. Over a wide range of configurations,
the analytic estimates agree very closely with the ns
estimates. Thus, the analytic models can be used
instead of simulation for comparing the policies with
respect to mean flow transfer time (as a function of
flow size) and mean packet transfer time. Furthermore,
an initial discrepancy between the analytic and
simulation estimates revealed errors in the parameter
values that are often specified in the widely used ns
Web workload generator. We develop an improved Web
workload specification, which is used to estimate the
packet jitter for long flows (more accurately than with
previous simulation workloads).Results for the
scheduling policies show that a particular policy,
LAS-log, greatly improves the mean flow transfer time
for priority long flows while providing performance
similar to LAS for the ordinary flows. Simulations show
that the LAS-log policy also greatly reduces the jitter
in packet delivery times for the priority flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FCFS and LAS models; LAS-based scheduling and models;
models validation; scheduling; service differentiation;
simulations",
}
@Article{Key:2004:ELP,
author = "Peter Key and Laurent Massouli{\'e} and Bing Wang",
title = "Emulating low-priority transport at the application
layer: a background transfer service",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "118--129",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Low priority data transfer across the wide area is
useful in several contexts, for example for the
dissemination of large files such as OS updates,
content distribution or prefetching. Although the
design of such a service is reasonably easy when the
underlying network supports service differentiation, it
becomes more challenging without such network support.
We describe an application level approach to designing
a low priority service --- one that is `lower than
best-effort' in the context of the current Internet. We
require neither network support nor changes to TCP.
Instead, we use a receive window control to limit the
transfer rate of the application, and the optimal rate
is determined by detecting a change-point. We motivate
this joint control-estimation problem by considering a
fluid-based optimisation framework, and describe
practical solutions, based on stochastic approximation
and binary search techniques. Simulation results
demonstrate the effectiveness of the approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "application reaction; background transfer; binary
search; low priority; stochastic approximation",
}
@Article{Raz:2004:RAQ,
author = "David Raz and Hanoch Levy and Benjamin Avi-Itzhak",
title = "A resource-allocation queueing fairness measure",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "130--141",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005704",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fairness is a major issue in the operation of queues,
perhaps it is the reason why queues were formed in the
first place. Recent studies show that the fairness of a
queueing system is important to customers not less than
the actual delay they experience. Despite this
observation little research has been conducted to study
fairness in queues, and no commonly agreed upon measure
of queue fairness exists. Two recent research
exceptions are Avi-Itzhak and Levy [1], where a
fairness measure is proposed, and Wierman and
Harchol-Balter [18] (this conference, 2003), where a
{\em criterion\/} is proposed for classifying service
policies as fair or unfair; the criterion focuses on
customer service requirement and deals with fairness
with respect to service times. In this work we
recognize that the inherent behavior of a queueing
system is governed by two major factors: Job {\em
seniority\/} (arrival times) and job {\em service
requirement\/} (service time). Thus, it is desired that
a queueing fairness measure would account for both. To
this end we propose a Resource Allocation Queueing
Fairness Measure, (RAQFM), that accounts for both
relative job seniority and relative service time. The
measure allows accounting for individual job
discrimination as well as system unfairness. The system
measure forms a full scale that can be used to evaluate
the level of unfairness under various queueing
disciplines. We present several basic properties of the
measure. We derive the individual measure as well as
the system measure for an M/M/1 queue under five
fundamental service policies: Processor Sharing (PS),
First Come First Served (FCFS), Non-Preemptive Last
Come First Served (NP-LCFS), Preemptive Last Come First
Served (P-LCFS), and Random Order of Service (ROS). The
results of RAQFM are then compared to those of Wierman
and Harchol-Balter [18], and the quite intriguing
observed differences are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; FCFS; job scheduling; M/M/1; processor
sharing; PS; queue disciplines; resource allocation;
unfairness",
}
@Article{Paxson:2004:MA,
author = "Vern Paxson",
title = "Measuring adversaries",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "142--142",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many concepts and techniques developed for general
Internet measurement have counterparts in the domain of
detecting and analyzing network attacks. The task is
greatly complicated, however, by the fact that the
object of study is {\em adversarial\/}: attackers do
not wish to be `measured' and will take steps to thwart
observation. We look at the far-ranging consequences of
this different measurement environment: the analysis
difficulties-some fundamental-that arise due to subtle
ambiguities in the true semantics of observed traffic;
new notions of `active measurement'; the highly
challenging task of rapidly characterizing
Internet-scale phenomena such as global worm pandemics;
the need for detailed application-level analysis and
related policy and legal difficulties; attacks that
target passive analysis tools; and the inherent `arms
race' nature of the undertaking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2004:FSF,
author = "Hwangnam Kim and Jennifer C. Hou",
title = "A fast simulation framework for {IEEE 802.11}-operated
wireless {LANs}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "143--154",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005706",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we develop a fast simulation framework
for IEEE 802.11-operated wireless LANs (WLANs), in
which a large number of packets are abstracted as a
single fluid chunk, and their behaviors are
approximated with analytic fluid models and figured
into the simulation. We first derive the analytical
model that characterizes data transmission activities
in IEEE 802.11-operated WLANs with/without the RTS/CTS
mechanism. All the control overhead incurred in the
physical and MAC layers, as well as system parameters
specified in IEEE 802.11 [12] are faithfully figured
in. We validate the model with simulation in cases in
which the network is and is not saturated. We then
implement, with the use of the time stepping technique
[21], the fast simulation framework for WLANs in {\em
ns-2\/} [2], and conduct a comprehensive simulation
study to evaluate the framework in terms of speed-up
and errors incurred under a variety of network
configurations. The simulation results indicate that
the proposed framework is indeed effective in
simulating IEEE 802.11-operated WLANs. It achieves as
much as two orders of magnitude improvement in terms of
execution time as compared to packet-level simulation.
The performance improvement is more pronounced when the
number of wireless nodes, the number of applications
running on each wireless node, or the number of WLANs
increases. The relative error, on the other hand, falls
within 2\% in all cases, as long as the value of the
time step is appropriately determined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fast simulation; IEEE 802.11; throughput analysis;
wireless LANs",
}
@Article{Hao:2004:ARF,
author = "Fang Hao and Murali Kodialam and T. V. Lakshman",
title = "{ACCEL-RATE}: a faster mechanism for memory efficient
per-flow traffic estimation",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "155--166",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005707",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Per-flow network traffic measurement is an important
component of network traffic management, network
performance assessment, and detection of anomalous
network events such as incipient DoS attacks. In [1],
the authors developed a mechanism called RATE where the
focus was on developing a memory efficient scheme for
estimating per-flow traffic rates to a specified level
of accuracy. The time taken by RATE to estimate the
per-flow rates is a function of the specified
estimation accuracy and this time is acceptable for
several applications. However some applications, such
as quickly detecting worm related activity or the
tracking of transient traffic, demand faster estimation
times. The main contribution of this paper is a new
scheme called ACCEL-RATE that, for a specified level of
accuracy, can achieve orders of magnitude decrease in
per-flow rate estimation times. It achieves this by
using a hashing scheme to split the incoming traffic
into several sub-streams, estimating the per-flow
traffic rates in each of the substreams and then
relating it back to the original per-flow traffic
rates. We show both theoretically and experimentally
that the estimation time of ACCEL-RATE is at least one
to two orders of magnitude lower than RATE without any
significant increase in the memory size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burtscher:2004:VFE,
author = "Martin Burtscher",
title = "{VPC3}: a fast and effective trace-compression
algorithm",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "167--176",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005708",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trace files are widely used in research and academia
to study the behavior of programs. They are simple to
process and guarantee repeatability. Unfortunately,
they tend to be very large. This paper describes {\em
vpc3}, a fundamentally new approach to compressing
program traces. {\em Vpc3\/} employs value predictors
to bring out and amplify patterns in the traces so that
conventional compressors can compress them more
effectively. In fact, our approach not only results in
much higher compression rates but also provides faster
compression and decompression. For example, compared to
{\em bzip2}, {\em vpc3\/}'s geometric mean compression
rate on SPECcpu2000 store address traces is 18.4 times
higher, compression is ten times faster, and
decompression is three times faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "predictor-based compression; trace compression; trace
files",
}
@Article{Kumar:2004:DSA,
author = "Abhishek Kumar and Minho Sung and Jun (Jim) Xu and Jia
Wang",
title = "Data streaming algorithms for efficient and accurate
estimation of flow size distribution",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "177--188",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005709",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Knowing the distribution of the sizes of traffic flows
passing through a network link helps a network operator
to characterize network resource usage, infer traffic
demands, detect traffic anomalies, and accommodate new
traffic demands through better traffic engineering.
Previous work on estimating the flow size distribution
has been focused on making inferences from sampled
network traffic. Its accuracy is limited by the
(typically) low sampling rate required to make the
sampling operation affordable. In this paper we present
a novel data streaming algorithm to provide much more
accurate estimates of flow distribution, using a `lossy
data structure' which consists of an array of counters
fitted well into SRAM. For each incoming packet, our
algorithm only needs to increment one underlying
counter, making the algorithm fast enough even for 40
Gbps (OC-768) links. The data structure is lossy in the
sense that sizes of multiple flows may collide into the
same counter. Our algorithm uses Bayesian statistical
methods such as Expectation Maximization to infer the
most likely flow size distribution that results in the
observed counter values after collision. Evaluations of
this algorithm on large Internet traces obtained from
several sources (including a tier-1 ISP) demonstrate
that it has very high measurement accuracy (within
2\%). Our algorithm not only dramatically improves the
accuracy of flow distribution measurement, but also
contributes to the field of data streaming by
formalizing an existing methodology and applying it to
the context of estimating the flow-distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; network measurement; statistical
inference; traffic analysis",
}
@Article{Ma:2004:GTA,
author = "Richard T. B. Ma and Sam C. M. Lee and John C. S. Lui
and David K. Y. Yau",
title = "A game theoretic approach to provide incentive and
service differentiation in {P2P} networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "189--198",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005711",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional peer-to-peer (P2P) networks do not provide
service differentiation and incentive for users.
Consequently, users can obtain services without
themselves contributing any information or service to a
P2P community. This leads to the `free-riding' and
`tragedy of the commons' problems, in which the
majority of information requests are directed towards a
small number of P2P nodes willing to share their
resources. The objective of this work is to enable
service differentiation in a P2P network based on the
amount of services each node has provided to its
community, thereby encouraging all network nodes to
share resources. We first introduce a resource
distribution mechanism between all information sharing
nodes. The mechanism is driven by a distributed
algorithm which has linear time complexity and
guarantees Pareto-optimal resource allocation. Besides
giving incentive, the mechanism distributes resources
in a way that increases the aggregate utility of the
whole network. Second, we model the whole resource
request and distribution process as a competition game
between the competing nodes. We show that this game has
a Nash equilibrium and is collusion-proof. To realize
the game, we propose a protocol in which all competing
nodes interact with the information providing node to
reach Nash equilibrium in a dynamic and efficient
manner. Experimental results are reported to illustrate
that the protocol achieves its service differentiation
objective and can induce productive information sharing
by rational network nodes. Finally, we show that our
protocol can properly adapt to different node arrival
and departure events, and to different forms of network
congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:2004:FRS,
author = "Simon S. Lam and Huaiyu Liu",
title = "Failure recovery for structured {P2P} networks:
protocol design and performance evaluation",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "199--210",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005712",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement studies indicate a high rate of node
dynamics in p2p systems. In this paper, we address the
question of how high a rate of node dynamics can be
supported by {\em structured\/} p2p networks. We
confine our study to the hypercube routing scheme used
by several structured p2p systems. To improve system
robustness and facilitate failure recovery, we
introduce the property of $K$-{\em consistency}, $ K
\geq 1$, which generalizes consistency defined
previously. (Consistency guarantees connectivity from
any node to any other node.) We design and evaluate a
failure recovery protocol based upon local information
for $K$-consistent networks. The failure recovery
protocol is then integrated with a join protocol that
has been proved to construct $K$-consistent neighbor
tables for concurrent joins. The integrated protocols
were evaluated by a set of simulation experiments in
which nodes joined a 2000-node network and nodes (both
old and new) were randomly selected to fail
concurrently over 10,000 seconds of simulated time. In
each such `churn' experiment, we took a `snapshot' of
neighbor tables in the network once every 50 seconds
and evaluated connectivity and consistency measures
over time as a function of the churn rate, timeout
value in failure recovery, and $K$. Storage and
communication overheads were also evaluated. We found
our protocols to be effective, efficient, and stable
for an average node lifetime as low as 8.3 minutes (the
median lifetime measured for Napster and Gnutella was
60 minutes [10]).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "failure recovery; hypercube routing; k-consistency;
peer-to-peer networks; sustainable churn rate",
}
@Article{Wang:2004:ZBP,
author = "Xiaoming Wang and Yueping Zhang and Xiafeng Li and
Dmitri Loguinov",
title = "On zone-balancing of peer-to-peer networks: analysis
of random node join",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "211--222",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005713",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Balancing peer-to-peer graphs, including zone-size
distributions, has recently become an important topic
of peer-to-peer (P2P) research [1], [2], [6], [19],
[31], [36]. To bring analytical understanding into the
various peer-join mechanisms, we study how
zone-balancing decisions made during the initial
sampling of the peer space affect the resulting zone
sizes and derive several asymptotic results for the
maximum and minimum zone sizes that hold with high
probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "balls-into-bins; load-balancing; modeling;
peer-to-peer",
}
@Article{Kansal:2004:PAT,
author = "Aman Kansal and Dunny Potter and Mani B. Srivastava",
title = "Performance aware tasking for environmentally powered
sensor networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "223--234",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005714",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of environmental energy is now emerging as a
feasible energy source for embedded and wireless
computing systems such as sensor networks where manual
recharging or replacement of batteries is not
practical. However, energy supply from environmental
sources is highly variable with time. Further, for a
distributed system, the energy available at its various
locations will be different. These variations strongly
influence the way in which environmental energy is
used. We present a harvesting theory for determining
performance in such systems. First we present a model
for characterizing environmental sources. Second, we
state and prove two harvesting theorems that help
determine the sustainable performance level from a
particular source. This theory leads to practical
techniques for scheduling processes in energy
harvesting systems. Third, we present our
implementation of a real embedded system that runs on
solar energy and uses our harvesting techniques. The
system adjusts its performance level in response to
available resources. Fourth, we propose a localized
algorithm for increasing the performance of a
distributed system by adapting the process scheduling
to the spatio-temporal characteristics of the
environmental energy in the distributed system. While
our theoretical intuition is based on certain
abstractions, all the scheduling methods we present are
motivated solely from the experimental behavior and
resource constraints of practical sensor networking
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy harvesting; performance guarantees; process
scheduling",
}
@Article{Bonald:2004:PBI,
author = "Thomas Bonald and Alexandre Prouti{\`e}re",
title = "On performance bounds for the integration of elastic
and adaptive streaming flows",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "235--245",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a network model where bandwidth is fairly
shared by a dynamic number of elastic and adaptive
streaming flows. Elastic flows correspond to data
transfers while adaptive streaming flows correspond to
audio/video applications with variable rate codecs. In
particular, the former are characterized by a fixed
size (in bits) while the latter are characterized by a
fixed duration. This flow-level model turns out to be
intractable in general. In this paper, we give
performance bounds for both elastic and streaming
traffic by means of sample-path arguments. These bounds
present the practical interest of being insensitive to
traffic characteristics like the distributions of
elastic flow size and streaming flow duration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive streaming traffic; elastic traffic;
flow-level analysis; insensitive bounds; multi-service
network",
}
@Article{Deb:2004:RBV,
author = "Supratim Deb and R. Srikant",
title = "Rate-based versus queue-based models of congestion
control",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "246--257",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mathematical models of congestion control capture the
congestion indication mechanism at the router in two
different ways: rate-based models, where the
queue-length at the router does not explicitly appear
in the model, and queue-based models, where the queue
length at the router is explicitly a part of the model.
Even though most congestion indication mechanisms use
the queue length to compute the packet marking or
dropping probability to indicate congestion, we argue
that, depending upon the choice of the parameters of
the AQM scheme, one would obtain a rate-based model or
a rate-and-queue-based model as the deterministic limit
of a stochastic system with a large number of users. We
also consider the impact of implementing AQM schemes in
the real queue or a virtual queue. If an AQM scheme is
implemented in a real queue, we show that, to ensure
that the queuing delays are negligible compared to
RTTs, one is forced to choose the parameters of a AQM
scheme in a manner which yields a rate-based
deterministic model. On the other hand, if the AQM
scheme is implemented in a virtual queue, small-queue
operation is achieved independent of the choice of the
parameters, thus showing a robustness property of
virtual queue-based schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AQM parameters; congestion control; virtual queue",
}
@Article{Chandrayana:2004:UCC,
author = "Kartikeya Chandrayana and Shivkumar Kalyanaraman",
title = "Uncooperative congestion control",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "258--269",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditionally uncooperative rate control schemes have
implied open loop protocols such as UDP, CBR, etc. In
this paper we show that closed loop uncooperative rate
control schemes also exist and that the current AQM
proposals cannot efficiently control their
mis-behavior. Moreover, these proposals require that
AQM be installed at all routers in the Internet which
is not only expensive but requires significant network
upgrade. In this paper we show that management of
uncooperative flows need not be coupled with AQM design
but can be viewed as edge based policing question. In
this paper we propose an analytical model for managing
uncooperative flows in the Internet by re-mapping their
utility function to a target range of utility
functions. This mapping can be achieved by
transparently manipulating congestion penalties
conveyed to the uncooperative users. The most
interesting aspect of this research is that this task
can be performed at the edge of the network with little
state information about uncooperative flows. The
proposed solution is independent of the buffer
management algorithm deployed on the network. As such
it works with Drop-Tail queues as well as any AQM
scheme. We have analyzed the framework and evaluated it
on various single and multi-bottleneck topologies with
both Drop-Tail and RED. Our results show that the
framework is robust and works well even in presence of
background traffic and reverse path congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; malicious behavior; optimization;
re-marking; selfish flows; uncooperative; utility
functions",
}
@Article{Applegate:2004:CNF,
author = "David Applegate and Lee Breslau and Edith Cohen",
title = "Coping with network failures: routing strategies for
optimal demand oblivious restoration",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "270--281",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Link and node failures in IP networks pose a challenge
for network control algorithms. Routing restoration,
which computes new routes that avoid failed links,
involves fundamental tradeoffs between efficient use of
network resources, complexity of the restoration
strategy and disruption to network traffic. In order to
achieve a balance between these goals, obtaining
routings that provide good performance guarantees under
failures is desirable. In this paper, building on
previous work that provided performance guarantees
under uncertain (and potentially unknown) traffic
demands, we develop algorithms for computing optimal
restoration paths and a methodology for evaluating the
performance guarantees of routing under failures. We
then study the performance of route restoration on a
diverse collection of ISP networks. Our evaluation uses
a competitive analysis type framework, where
performance of routing with restoration paths under
failures is compared to the best possible performance
on the failed network. We conclude that with careful
selection of restoration paths one can obtain
restoration strategies that retain nearly optimal
performance on the failed network while minimizing
disruptions to traffic flows that did not traverse the
failed parts of the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "demand-oblivious routing; restoration; routing",
}
@Article{Sevcik:2004:SSA,
author = "Kenneth C. Sevcik",
title = "Some systems, applications and models {I} have known",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "282--282",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Being named recipient of the 2004 ACM Sigmetrics
Achievement Award has done several things to me. It
brought me surprise that I would be singled out from
the many people who have made significant and sustained
contributions to the field of performance evaluation.
It also brought me deep appreciation for all the
students and colleagues with whom I have worked and
come to know as friends over the years. Finally, it has
caused me to ponder and reminisce about many of the
research projects and consulting studies in which I
have participated. In this talk, I will describe
various systems I have used and studied, various
applications of interest, and various models that I,
and others, have used to try to gain insights into the
performance of systems. Some lessons of possible future
relevance that emerge from this retrospective look at a
wide variety of projects are the following:
Exact Answers Are Overrated --- While exact solutions
of mathematical models are intellectually satisfying,
they are often not needed in practice.
Analytic Models Have a Role --- Analytic models can be
used to obtain quick and inexpensive answers to
performance questions in many situations where neither
simulation nor experimentation are
feasible.
Assumptions Matter --- Subtle changes to the
assumptions that underlie an analytic model can
substantially alter the conclusions reached based on
the model.
After considering all the methods of analysis,
simulation and experimentation, my recommendation for
the very best means to attain substantially improved
computer system performance is: Wait thirty years!",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tinnakornsrisuphap:2004:CQF,
author = "Peerapol Tinnakornsrisuphap and Richard J. La",
title = "Characterization of queue fluctuations in
probabilistic {AQM} mechanisms",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "283--294",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005721",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a framework for studying the interaction of
a probabilistic active queue management (AQM) algorithm
with a generic end-user congestion-control mechanism.
We show that as the number of flows in the network
increases, the queue dynamics can be accurately
approximated by a simple deterministic process. In
addition, we investigate the sources of queue
fluctuations in this setup. We characterize two
distinct sources of queue fluctuations; one is the
deterministic oscillations which can be captured
through the aforementioned deterministic process. The
other source is the random fluctuations introduced by
the probabilistic nature of the marking schemes. We
discuss the relationship between these two types of
fluctuations and provide insights into how to control
them. Concrete examples in this framework are given for
several popular algorithms such as Random Early
Detection, Random Early Marking and Transmission
Control Protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active queue management; central limit theorem; queue
fluctuations",
}
@Article{Vanichpun:2004:OCU,
author = "Sarut Vanichpun and Armand M. Makowski",
title = "The output of a cache under the independent reference
model: where did the locality of reference go?",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "295--306",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005722",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a cache operating under a demand-driven
replacement policy when document requests are modeled
according to the Independent Reference Model (IRM). We
characterize the popularity pmf of the stream of misses
from the cache, the so-called output of the cache, for
a large class of demand-driven cache replacement
policies. We measure strength of locality of reference
in a stream of requests through the skewness of its
popularity distribution. Using the notion of
majorization to capture this degree of skewness, we
show that for the policy $ A_0 $ and the random policy,
the output always has less locality of reference than
the input. However, we show by counterexamples that
this is not always the case under the LRU and CLIMB
policies when the input is selected according to a
Zipf-like pmf. In that case, conjectures are offered
(and supported by simulations) as to when LRU or CLIMB
caching indeed reduces locality of reference.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "locality of reference; majorization; output of a
cache; popularity",
}
@Article{Teixeira:2004:DHP,
author = "Renata Teixeira and Aman Shaikh and Tim Griffin and
Jennifer Rexford",
title = "Dynamics of hot-potato routing in {IP} networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "307--319",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005723",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the architectural separation between
intradomain and interdomain routing in the Internet,
intradomain protocols do influence the path-selection
process in the Border Gateway Protocol (BGP). When
choosing between multiple equally-good BGP routes, a
router selects the one with the {\em closest\/} egress
point, based on the intradomain path cost. Under such
{\em hot-potato\/} routing, an intradomain event can
trigger BGP routing changes. To characterize the
influence of hot-potato routing, we conduct controlled
experiments with a commercial router. Then, we propose
a technique for associating BGP routing changes with
events visible in the intradomain protocol, and apply
our algorithm to AT&T's backbone network. We show that
(i) hot-potato routing can be a significant source of
BGP updates, (ii) BGP updates can lag {\em 60\/}
seconds or more behind the intradomain event, (iii) the
number of BGP path changes triggered by hot-potato
routing has a nearly uniform distribution across
destination prefixes, and (iv) the fraction of BGP
messages triggered by intradomain changes varies
significantly across time and router locations. We show
that hot-potato routing changes lead to longer delays
in forwarding-plane convergence, shifts in the flow of
traffic to neighboring domains, extra
externally-visible BGP update messages, and
inaccuracies in Internet performance measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; convergence; hot-potato routing; OSPF",
}
@Article{Agarwal:2004:IBD,
author = "Sharad Agarwal and Chen-Nee Chuah and Supratik
Bhattacharyya and Christophe Diot",
title = "The impact of {BGP} dynamics on intra-domain traffic",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "319--330",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005724",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work in network traffic matrix estimation has
focused on generating router-to-router or PoP-to-PoP
(Point-of-Presence) traffic matrices within an ISP
backbone from network link load data. However, these
estimation techniques have not considered the impact of
inter-domain routing changes in BGP (Border Gateway
Protocol). BGP routing changes have the potential to
introduce significant errors in estimated traffic
matrices by causing traffic shifts between egress
routers or PoPs within a single backbone network. We
present a methodology to correlate BGP routing table
changes with packet traces in order to analyze how BGP
dynamics affect traffic fan-out within a large `tier-1'
network. Despite an average of 133 BGP routing updates
per minute, we find that BGP routing changes do not
cause more than 0.03\% of ingress traffic to shift
between egress PoPs. This limited impact is mostly due
to the relative stability of network prefixes that
receive the majority of traffic --- 0.05\% of BGP
routing table changes affect intra-domain routes for
prefixes that carry 80\% of the traffic. Thus our work
validates an important assumption underlying existing
techniques for traffic matrix estimation in large IP
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; traffic analysis; traffic engineering; traffic
matrix",
}
@Article{Feamster:2004:MBR,
author = "Nick Feamster and Jared Winick and Jennifer Rexford",
title = "A model of {BGP} routing for network engineering",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "331--342",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005726",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of IP networks depends on a wide
variety of dynamic conditions. Traffic shifts,
equipment failures, planned maintenance, and topology
changes in other parts of the Internet can all degrade
performance. To maintain good performance, network
operators must continually reconfigure the routing
protocols. Operators configure BGP to control how
traffic flows to neighboring Autonomous Systems (ASes),
as well as how traffic traverses their networks.
However, because BGP route selection is distributed,
indirectly controlled by configurable policies, and
influenced by complex interactions with intradomain
routing protocols, operators cannot predict how a
particular BGP configuration would behave in practice.
To avoid inadvertently degrading network performance,
operators need to evaluate the effects of configuration
changes {\em before deploying them on a live network}.
We propose an algorithm that computes the outcome of
the BGP route selection process for each router in a
{\em single\/} AS, given only a static snapshot of the
network state, without simulating the complex details
of BGP message passing. We describe a BGP emulator
based on this algorithm; the emulator exploits the
unique characteristics of routing data to reduce
computational overhead. Using data from a large ISP, we
show that the emulator correctly computes BGP routing
decisions and has a running time that is acceptable for
many tasks, such as traffic engineering and capacity
planning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; modeling; routing; traffic engineering",
}
@Article{Baccelli:2004:MFA,
author = "Fran{\c{c}}ois Baccelli and Augustin Chaintreau and
Danny De Vleeschauwer and David McDonald",
title = "A mean-field analysis of short lived interacting {TCP}
flows",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "343--354",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005727",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider a set of HTTP flows using
TCP over a common drop-tail link to download files.
After each download, a flow waits for a random think
time before requesting the download of another file,
whose size is also random. When a flow is active its
throughput is increasing with time according to the
additive increase rule, but if it suffers losses
created when the total transmission rate of the flows
exceeds the link rate, its transmission rate is
decreased. The throughput obtained by a flow, and the
consecutive time to download one file are then given as
the consequence of the interaction of all the flows
through their total transmission rate and the link's
behavior. We study the mean-field model obtained by
letting the number of flows go to infinity. This
mean-field limit may have two stable regimes: one
without congestion in the link, in which the density of
transmission rate can be explicitly described, the
other one with periodic congestion epochs, where the
inter-congestion time can be characterized as the
solution of a fixed point equation, that we compute
numerically, leading to a density of transmission rate
given by as the solution of a Fredholm equation. It is
shown that for certain values of the parameters (more
precisely when the link capacity per user is not
significantly larger than the load per user), each of
these two stable regimes can be reached depending on
the initial condition. This phenomenon can be seen as
an analogue of turbulence in fluid dynamics: for some
initial conditions, the transfers progress in a fluid
and interaction-less way; for others, the connections
interact and slow down because of the resulting
fluctuations, which in turn perpetuates interaction
forever, in spite of the fact that the load per user is
less than the capacity per user. We prove that this
phenomenon is present in the Tahoe case and both the
numerical method that we develop and simulations
suggest that it is present in the Reno case too. It
translates into a bi-stability phenomenon for the
finite population model within this range of
parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "HTTP connections; mean-field model",
}
@Article{Hohn:2004:BRP,
author = "N. Hohn and D. Veitch and K. Papagiannaki and C.
Diot",
title = "Bridging router performance and queuing theory",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "355--366",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005728",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper provides an authoritative knowledge of
through-router packet delays and therefore a better
understanding of data network performance. Thanks to a
unique experimental setup, we capture {\em all\/}
packets crossing a router for 13 hours and present
detailed statistics of their delays. These measurements
allow us to build the following physical model for
router performance: each packet experiences a minimum
router processing time before entering a fluid output
queue. Although simple, this model reproduces the
router behaviour with excellent accuracy and avoids two
common pitfalls. First we show that in-router packet
processing time accounts for a significant portion of
the overall packet delay and should not be neglected.
Second we point out that one should fully understand
both link and physical layer characteristics to use the
appropriate bandwidth value. Focusing directly on
router performance, we provide insights into system
busy periods and show precisely how queues build up
inside a router. We explain why current practices for
inferring delays based on average utilization have
fundamental problems, and propose an alternative
solution to directly report router delay information
based on busy period statistics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet delay analysis; router model",
}
@Article{Bonald:2004:ILB,
author = "T. Bonald and M. Jonckheere and A. Prouti{\'e}re",
title = "Insensitive load balancing",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "367--377",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005729",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large variety of communication systems, including
telephone and data networks, can be represented by
so-called Whittle networks. The stationary distribution
of these networks is insensitive, depending on the
service requirements at each node through their mean
only. These models are of considerable practical
interest as derived engineering rules are robust to the
evolution of traffic characteristics. In this paper we
relax the usual assumption of static routing and
address the issue of dynamic load balancing.
Specifically, we identify the class of load balancing
policies which preserve insensitivity and characterize
optimal strategies in some specific cases. Analytical
results are illustrated numerically on a number of toy
network examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "insensitivity; load balancing; whittle networks",
}
@Article{Bonald:2004:WDP,
author = "T. Bonald and S. Borst and N. Hegde and A.
Prouti{\'e}re",
title = "Wireless data performance in multi-cell scenarios",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "378--380",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005730",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of wireless data systems has been
extensively studied in the context of a single base
station. In the present paper we investigate the
flow-level performance in networks with multiple base
stations. We specifically examine the complex, dynamic
interaction of the number of active flows in the
various cells introduced by the strong impact of
interference between neighboring base stations. For the
downlink data transmissions that we consider, lower
service rates caused by increased interference from
neighboring base stations result in longer delays and
thus a higher number of active flows. This in turn
results in a longer duration of interference on
surrounding base stations, causing a strong correlation
between the activity states of the base stations. Such
a system can be modelled as a network of multi-class
processor-sharing queues, where the service rates for
the various classes at each queue vary over time as
governed by the activity state of the other queues. The
complex interaction between the various queues renders
an exact analysis intractable in general. A simplified
network with only one class per queue reduces to a
coupled-processors model, for which there are few
results, even in the case of two queues. We thus derive
bounds and approximations for key performance metrics
like the number of active flows, transfer delays, and
flow throughputs in the various cells. Importantly,
these bounds and approximations are insensitive,
yielding simple expressions, that render the detailed
statistical characteristics of the system largely
irrelevant.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "elastic traffic; fluid regime; insensitivity;
multi-class processor-sharing; quasi-stationary regime;
stability; time-varying service; wireless data
networks",
}
@Article{Kapoor:2004:CSA,
author = "Rohit Kapoor and Ling-Jyh Chen and Alok Nandan and
Mario Gerla and M. Y. Sanadidi",
title = "{CapProbe}: a simple and accurate capacity estimation
technique for wired and wireless environments",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "390--391",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005732",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of estimating the capacity of an Internet
path is one of fundamental importance. Due to the
multitude of potential applications, a large number of
solutions have been proposed and evaluated. The
proposed solutions so far have been successful in
partially addressing the problem, but have suffered
from being slow, obtrusive or inaccurate. In this work,
we evaluate CapProbe, a low-cost and accurate
end-to-end capacity estimation scheme that relies on
packet dispersion techniques as well as end-to-end
delays. The key observation that enabled the
development of CapProbe is that both compression and
expansion of packet pair dispersion are the result of
queuing due to cross-traffic. By filtering out queuing
effects from packet pair samples, CapProbe is able to
estimate capacity accurately in most environments, with
minimal processing and probing traffic overhead. In
fact, the storage and processing requirements of
CapProbe are orders of magnitude smaller than most of
the previously proposed schemes. We tested CapProbe
through simulation, Internet, Internet2 and wireless
experiments. We found that CapProbe error percentage in
capacity estimation was within 10\% in almost all
cases, and within 5\% in most cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity estimation; delay; dispersion; packet pair",
}
@Article{Sommers:2004:HFL,
author = "Joel Sommers and Hyungsuk Kim and Paul Barford",
title = "{Harpoon}: a flow-level traffic generator for router
and network tests",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "392--392",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005733",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe Harpoon, a new application-independent
tool for generating representative packet traffic at
the {\em IP flow level}. Harpoon is a configurable tool
for creating TCP and UDP packet flows that have the
same byte, packet, temporal, and spatial
characteristics as measured at routers in live
environments. We validate Harpoon using traces
collected from a live router and then demonstrate its
capabilities in a series of router performance
benchmark tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network flows; traffic generation",
}
@Article{Ribeiro:2004:STA,
author = "Vinay J. Ribeiro and Rudolf H. Riedi and Richard G.
Baraniuk",
title = "Spatio-temporal available bandwidth estimation with
{STAB}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "394--395",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005734",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of locating in space and over
time a network path's {\em tight\/} link, that is the
link with the least available bandwidth on the path.
Tight link localization benefits network-aware
applications, provides insight into the causes of
network congestion and ways to circumvent it, and aids
network operations. We present {\em STAB}, a
light-weight probing tool to locate tight links. STAB
combines the probing concepts of self-induced
congestion, tailgating, and packet chirps in a novel
fashion. We demonstrate its capabilities through
experiments on the Internet and verify our results
using router MRTG data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "available bandwidth; bandwidth; bottleneck; chirps;
estimation; probing; tailgating; tight link",
}
@Article{Rajendran:2004:OQS,
author = "Raj Kumar Rajendran and Dan Rubenstein",
title = "Optimizing the quality of scalable video streams on
{P2P} networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "396--397",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005735",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "P2P; quality; scheduling; streaming; video",
}
@Article{Wang:2004:PAT,
author = "Helen J. Wang and John Platt and Yu Chen and Ruyun
Zhang and Yi-Min Wang",
title = "{PeerPressure} for automatic troubleshooting",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "398--399",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005736",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "automatic troubleshooting; Bayesian estimates; golden
state; PeerPressure; statistics; system management",
}
@Article{Hahner:2004:QAP,
author = "J{\"o}rg H{\"a}hner and Dominique Dudkowski and Pedro
Jos{\'e} Marr{\'o}n and Kurt Rothermel",
title = "A quantitative analysis of partitioning in mobile ad
hoc networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "400--401",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005737",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mobile ad hoc networks; network topology; partition
metrics",
}
@Article{Zhang:2004:LTL,
author = "Dalu Zhang and Weili Huang and Chen Lin",
title = "Locating the tightest link of a network path",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "402--403",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005738",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The tightest link of a network path is the link where
the end-to-end available bandwidth is limited. We
propose a new probe technique, called Dual Rate
Periodic Streams (DRPS), for finding the location of
the tightest link. A DRPS probe is a periodic stream
with two rates. Initially, it goes through the path at
a comparatively high rate. When arrived at a particular
link, the probe shifts its rate to a lower level and
keeps the rate. If proper rates are set to the probe,
we can control whether the probe is congested or not by
adjusting the shift time. When the point of rate shift
is in front of the tightest link, the probe can go
through the path without congestion, otherwise
congestion occurs. Thus, we can find the location of
the tightest link by congestion detection at the
receiver.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "available bandwidth; dual rate periodic streams
(DRPS); network measurements; tight link",
}
@Article{Sullivan:2004:UPR,
author = "David G. Sullivan and Margo I. Seltzer and Avi
Pfeffer",
title = "Using probabilistic reasoning to automate software
tuning",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "404--405",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005739",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Manually tuning the parameters or `knobs' of a complex
software system is an extremely difficult task.
Ideally, the process of software tuning should be
automated, allowing software systems to reconfigure
themselves as needed in response to changing
conditions. We present a methodology that uses a
probabilistic, graphical model known as an influence
diagram as the foundation of an effective, automated
approach to software tuning. We have used our
methodology to simultaneously tune four knobs from the
Berkeley DB embedded database system, and our results
show that an influence diagram can effectively
generalize from training data for this domain.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "influence diagrams; probabilistic reasoning;
self-tuning systems",
}
@Article{Wang:2004:MST,
author = "Bing Wang and Jim Kurose and Prashant Shenoy and Don
Towsley",
title = "Multimedia streaming via {TCP}: an analytic
performance study",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "406--407",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005740",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multimedia streaming; performance modeling",
}
@Article{Wynter:2004:PIQ,
author = "Laura Wynter and Cathy H. Xia and Fan Zhang",
title = "Parameter inference of queueing models for {IT}
systems using end-to-end measurements",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "408--409",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005741",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "end-to-end measurements; inference; queueing models",
}
@Article{Pfaff:2004:PAB,
author = "Ben Pfaff",
title = "Performance analysis of {BSTs} in system software",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "410--411",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005742",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AVL tree; binary search tree; BST; red-black tree;
splay tree; threaded tree",
}
@Article{Wang:2004:SDP,
author = "Mengzhi Wang and Kinman Au and Anastassia Ailamaki and
Anthony Brockwell and Christos Faloutsos and Gregory R.
Ganger",
title = "Storage device performance prediction with {CART}
models",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "412--413",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005743",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work explores the application of a machine
learning tool, CART modeling, to storage devices. We
have developed approaches to predict a device's
performance as a function of input workloads, requiring
no knowledge of the device internals. Two uses of CART
models are considered: one that predicts per-request
response times (and then derives aggregate values) and
one that predicts aggregate values directly from
workload characteristics. After training on the device
in question, both provide reasonably-accurate black box
models across a range of test traces from real
environments. An expanded version of this paper is
available as a technical report [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance prediction; storage device modeling",
}
@Article{Kamra:2004:CPT,
author = "Abhinav Kamra and Vishal Misra and Erich Nahum",
title = "Controlling the performance of 3-tiered {Web} sites:
modeling, design and implementation",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "414--415",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005744",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; control theory; e-commerce; TPC-W",
}
@Article{Roughan:2004:CRT,
author = "Matthew Roughan and Tim Griffin and Morley Mao and
Albert Greenberg and Brian Freeman",
title = "Combining routing and traffic data for detection of
{IP} forwarding anomalies",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "416--417",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005745",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "IP forwarding anomalies, triggered by equipment
failures, implementation bugs, or configuration errors,
can significantly disrupt and degrade network service.
Robust and reliable detection of such anomalies is
essential to rapid problem diagnosis, problem
mitigation, and repair. We propose a simple, robust
method that integrates routing and traffic data streams
to reliably detect forwarding anomalies. The overall
method is scalable, automated and self-training. We
find this technique effectively identifies forwarding
anomalies, while avoiding the high false alarms rate
that would otherwise result if either stream were used
unilaterally.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; network anomaly detection; routing; SNMP;
traffic",
}
@Article{Tao:2004:EPB,
author = "Shu Tao and Kuai Xu and Ying Xu and Teng Fei and Lixin
Gao and Roch Guerin and Jim Kurose and Don Towsley and
Zhi-Li Zhang",
title = "Exploring the performance benefits of end-to-end path
switching",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "418--419",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005746",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multi-homing; overlay; path switching",
}
@Article{Kaplan:2004:CFR,
author = "Scott F. Kaplan",
title = "Complete or fast reference trace collection for
simulating multiprogrammed workloads: choose one",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "420--421",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005747",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "reference trace collection; trace-driven simulation",
}
@Article{Raghunath:2004:QTO,
author = "Satish Raghunath and Shivkumar Kalyanaraman and K. K.
Ramakrishnan",
title = "Quantifying trade-offs in resource allocation for
{VPNs}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "422--423",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005748",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Virtual Private Networks (VPNs) feature notable
characteristics in structure and traffic patterns that
allow for efficient resource allocation. A strategy
that exploits the underlying characteristics of a VPN
can result in significant capacity savings to the
service provider. There are a number of admission
control and bandwidth provisioning strategies to choose
from. We examine trade-offs in design choices in the
context of distinctive characteristics of VPNs. We
examine the value of signaling-based mechanisms,
traffic matrix information and structural
characteristics of VPNs in the way they impact resource
utilization and service quality. We arrive at important
conclusions which could have an impact on the way VPNs
are architected. We show that the structure of VPNs
profoundly influences achievable resource utilization
gains with various admission control and provisioning
schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "hose model; point-to-multipoint; point-to-set; virtual
private networks",
}
@Article{Ruan:2004:ONS,
author = "Yaoping Ruan and Vivek S. Pai",
title = "The origins of network server latency \& the myth of
connection scheduling",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "424--425",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005749",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the origins of server-induced latency
to understand how to improve latency optimization
techniques. Using the Flash Web server [4], we analyze
latency behavior under various loads. Despite latency
profiles that suggest standard queuing delays, we find
that most latency actually originates from negative
interactions between the application and the locking
and blocking mechanisms in the kernel. Modifying the
server and kernel to avoid these problems yields both
qualitative and quantitative changes in the latency
profiles --- latency drops by more than an order of
magnitude, and the effective service discipline also
improves. We find our modifications also mitigate
service burstiness in the application, reducing the
event queue lengths dramatically and eliminating any
benefit from application-level connection scheduling.
We identify one remaining source of unfairness, related
to competition in the networking stack. We show that
adjusting the TCP congestion window size addresses this
problem, reducing latency by an additional factor of
three.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "connection scheduling; latency; network server",
}
@Article{Anagnostakis:2004:HDI,
author = "K. G. Anagnostakis and M. B. Greenwald",
title = "A hybrid direct-indirect estimator of network internal
delays",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "426--427",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005750",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay; ICMP timestamp; network tomography",
}
@Article{Carlsson:2004:MPS,
author = "Niklas Carlsson and Derek L. Eager and Mary K.
Vernon",
title = "Multicast protocols for scalable on-demand download",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "428--429",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "batching; cyclic multicast; scalable download
protocols",
}
@Article{Pai:2004:IPI,
author = "Vijay S. Pai and Scott Rixner and Hyong-youb Kim",
title = "Isolating the performance impacts of network interface
cards through microbenchmarks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "430--431",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005752",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network server performance; networking
microbenchmarks",
}
@Article{Chu:2004:ECU,
author = "Jacky Chu and Kevin Labonte and Brian Neil Levine",
title = "An evaluation of {Chord} using traces of peer-to-peer
file sharing",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "432--433",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2004:GEF,
author = "Mark S. Squillante",
title = "{Guest Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "2--2",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035336",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Osogami:2004:RAT,
author = "Takayuki Osogami and Adam Wierman and Mor
Harchol-Balter and Alan Scheller-Wolf",
title = "A recursive analysis technique for multi-dimensionally
infinite {Markov} chains",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "3--5",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035337",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance analysis of multiserver systems with
multiple classes of jobs often has a common source of
difficulty: the state space needed to capture the
system behavior grows infinitely in multiple
dimensions. For example, consider two processors, each
serving its own M/M/1 queue, where one of the
processors (the `donor') can help the other processor
(the `beneficiary') with its jobs, during times when
the donor processor is idle [5, 16] or when some
threshold conditions are met [14, 15]. Since the
behavior of beneficiary jobs depends on the number of
donor jobs in system, performance analysis of
beneficiary jobs involves a two dimensionally infinite
(2D-infinite) state space, where one dimension
corresponds to the number of beneficiary jobs and the
other dimension corresponds to the number of donor
jobs. Another example is an M/M/2 queue with two
priority classes, where high priority jobs have
preemptive priority over low priority jobs (see for
example [1, 3, 4, 8, 10, 11, 12, 17] and references
therein). Since the behavior of low priority jobs
depends on the number of high priority jobs in system,
performance analysis of low priority jobs involves
2D-infinite state space, where each dimension
corresponds to the number of each class of jobs in
system. As we will see, when there are m priority
classes, performance analysis of the lowest priority
classes involves m dimensionally infinite state
space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{daSilva:2004:EAT,
author = "Ana Paula Couto da Silva and Rosa M. M. Le{\"a}o and
Edmundo {de Souza e Silva}",
title = "An efficient approximate technique for solving fluid
models",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "6--8",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035338",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic fluid-flow models have been widely used as
an important tool for the analysis of a variety of
computer and communication models. In particular, when
the event rates of the system under investigation vary
in orders of magnitude, the use of fluid models results
in considerable computational savings when compared to
traditional models where all events are explicitly
represented. This is true for instance, in the so
called performability models [10], where events that
represent structural changes in the system (e.g.,
failure and repair events) occur at much lower rates
than those associated with some performance measure,
such as the arrival and service of jobs. As another
example, consider a queueing model of a communication
network channel. The intervals between events
associated with packet arrival and departure from a
buffer may be orders of magnitude smaller than the
intervals that represent changes in the arrival rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kogan:2004:TPI,
author = "Yaakov Kogan and Gagan Choudhury",
title = "Two problems in {Internet} reliability: new questions
for old models",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "9--11",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035339",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is motivated by two problems related to
Internet reliability, where transient rather than
traditional steady-state analysis is required. First, a
failure and repair model for a router with active and
redundant processors is considered. It is proved that
the number of failed routers during given interval of
time is asymptotically Poisson when the total number of
routers is large and the parameter of the Poisson
process is explicitly calculated. The second problem is
related to reliability of a nationwide IP backbone. A
situation, where operational links do not have enough
spare capacity to carry additional traffic during the
outage time, is referred to as bandwidth loss. We
consider only one unidirectional backbone link and
derive asymptotic approximations for the expected
bandwidth loss in the framework of generalized Erlang
and Engset models when the total number of resource
units and request arrival rates are proportionally
large.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2004:FSS,
author = "Adam Wierman and Mor Harchol-Balter",
title = "Formalizing {SMART} scheduling",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "12--13",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035340",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well-known that policies which bias towards
small job sizes or jobs with small remaining service
times perform well with respect to mean response time
and mean slowdown. This idea has been fundamental in
many system implementations including the case of Web
servers, where it has been shown that by giving
priority to requests for small files, a Web server can
significantly reduce mean response time and mean
slowdown [1]. The heuristic has also been applied to
other application areas; for example, scheduling in
supercomputing centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raz:2004:HFQ,
author = "David Raz and Benjamin Avi-Itzhak and Hanoch Levy",
title = "How fair is queue prioritization?",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "14--16",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035341",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Customer classification and prioritization are
commonly used in many applications to provide queue
preferential service. Their influence on queuing
systems has been thoroughly studied from the delay
distribution perspective. However, the fairness
aspects, which are inherent to any preferential system
and highly important to customers, have hardly been
studied and not been quantified to date. In this work
we use the Resource Allocation Queueing Fairness
Measure (RAQFM) to analyze such systems and derive
their relative fairness values. We also analyze the
effect multiple servers have on fairness, showing that
multiple servers increase the fairness of the
system.1",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feng:2004:RBC,
author = "Hanhua Feng and Vishal Misra",
title = "On the relationship between coefficient of variation
and the performance of {M/G/1-FB} queues",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "17--19",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035342",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we analyze how the coefficient of
variation of the service time distribution affects the
mean sojourn time of M/G/1-FB queues. The results show
that the coefficient of variation is a necessary but
not sufficient measure to characterize heavy-tailed
distributions in term of the performance under the FB
policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2004:DSM,
author = "Junxia Chang and Hayriye Ayhan and Jim Dai",
title = "Dynamic scheduling of multiclass open queueing
networks in a slowly changing environment",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "20--21",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035343",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The popularity and importance of Web have increased
dramatically in the past few years as well as the
complexity of Web server systems. Workload
characterization studies reveal that there exist strong
time-of-day effects in the Web traffic. Many Web sites
have sustained and higher hit rates during certain time
periods of a day than other time periods. During the
peak hours, the Web servers may even be overloaded.
Simple stochastic processes with a fixed rate fails to
capture this time varying characteristic of the Web
systems. Therefore, we herein consider that the Web
system is operating in a changing environment. Whenever
the environment changes state, the arrival rates of
user requests change as well as the service rates and
the routing decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2004:KPP,
author = "Vladimir Marbukh",
title = "A knowledge plane as a pricing mechanism for
aggregate, user-centric utility maximization",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "22--24",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035344",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes pricing user centric requirements
as a potential role for the Knowledge Plane. Assuming
elastic users capable of modifying their behavior in
response to the pricing signals, this approach may
result in optimal resource allocation without necessity
for the users to acquire detailed information on the
network state as well as advanced knowledge of the user
requirements by the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "elastic users; network; performance; pricing;
utility",
}
@Article{Lin:2004:CMM,
author = "Wuqin Lin and Zhen Liu and Cathy H. Xia and Li Zhang",
title = "Cost minimization of multi-tiered e-business
infrastructure with end-to-end delay guarantees",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "25--27",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-Business has become a cost effective solution for
many traditional businesses and a critical component of
many companies to such a degree that guaranteeing the
performance and availability is vital. The design and
development of e-business infrastructure should meet a
twofold challenge. On one hand, it must meet customer
expectations in terms of quality of service (QoS). On
the other hand, companies have to control IT costs to
stay competitive. It is therefore crucial to understand
the tradeoff between costs and service levels so as to
enable the determination of the most cost-effective
architecture and system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adler:2004:TOP,
author = "Micah Adler and Rakesh Kumar and Keith Ross and Dan
Rubenstein and David Turner and David D. Yao",
title = "Two optimal peer selection problems",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "28--30",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035346",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many peer computers today participate in peer-to-peer
file sharing applications in which the computers
contribute storage and bandwidth resources. Of course,
applications can only harness the resource pool if
peers make available their surplus resources to them.
It is widely documented, however, that the P2P systems
are havens for `free riders': a significant fraction of
users do not contribute any resources, and a minute
fraction of users contribute the majority of the
resources. Clearly, to improve the performance of
existing P2P file sharing systems, and to enable new
classes of P2P applications, a compelling incentive
system needs to be put in place.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2004:CDS,
author = "E. G. Coffman and Andreas Constantinides and Dan
Rubenstein and Bruce Shepherd and Angelos Stavrou",
title = "Content distribution for seamless transmission",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "31--32",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035347",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new paradigm in information
transmission, the concept of SEAMLESS TRANSMISSION,
whereby any client in a network requesting a file
starts receiving it immediately, and experiences no
delays throughout the remainder of the downloading
time. This notion is based on the partial caching
concept [2] which was introduced to overcome some of
the disadvantages of traditional cache replacement
algorithms such as LRU and LRU-threshold [1]. The main
idea of partial caching is to store an initial part of
the file in the cache and to obtain the rest of the
file from the origin server. To achieve the maximal
retrieval performance of seamless transmission, clients
must be prepared to re-sequence segments of the files
received out of order. With this caveat, seamless
transmission can be viewed as a way to implement strict
quality of service (QoS) guarantees to all clients of a
network. This paper gives a provably correct technique
for achieving seamlessness for a given file located at
the root node in a tree structured network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2004:AOT,
author = "David Gamarnik and Petar Mom{\v{c}}ilovi{\'c}",
title = "An asymptotic optimality of the transposition rule for
linear lists",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "33--34",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035348",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The linear list is one of basic data structures in
computer science with search being a primary operation
defined on it. Items are located in the list by
sequentially examining them from the beginning of the
list. Intuitively one would like to place items that
are frequently requested at the front of the list in
order to minimize the number of items being examined.
Given the properties of the request sequence one could
place items in an order that minimizes the search cost.
Yet often properties of the request sequence are either
not known in advance or time dependent. Hence, it is
desirable to employ self-organizing algorithms. The two
best known such rules are the move-to-front and
transposition rule [9, Section 6]. In addition to being
simple these rules are memory-free, i.e., require no
memory for their operation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "average-case analysis; exclusion process;
self-organizing list",
}
@Article{Baryshnikov:2004:SAT,
author = "Yuliy Baryshnikov and Ed Coffman and Petar
Mom{\v{c}}ilovi{\'c}",
title = "Self assembly times in {DNA}-based computation",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "35--37",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035349",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Speed of computation and power consumption are the two
main parameters of conventional computing devices
implemented in microelectronic circuits. As performance
of such devices approaches physical limits, new
computing paradigms are emerging. Two paradigms
receiving great attention are quantum and DNA-based
molecular computing.\par
This paper focuses on DNA-based computing. This concept
can be abstracted to growth models where computational
elements called tiles are self-assembled one by one,
subject to some simple hierarchical rules, to fill a
given template encoding a Boolean formula. While
DNA-based computational devices are known to be
extremely energy efficient, little is known concerning
the fundamental question of computation times. In
particular, given a function, we study the time
required to determine its value for a given input. In
the simplest instance, the analysis has interesting
connections with interacting particle systems and
variational problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saniee:2004:PDS,
author = "Iraj Saniee and Indra Widjaja and John Morrison",
title = "Performance of a distributed scheduling protocol for
{TWIN}",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "38--40",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035350",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses a scheduling mechanism for a new
network architecture (TWIN) that provides arbitrary
capacity up to a wavelength to any source-destination
pair as needed, without optical-to-electronic
conversion. The network emulates ultra-fast switching
in the passive network core through the use of
ultra-fast wavelength tunable lasers at the network
edge. This architecture is suitable for any end-to-end
traffic load, from static or quasi-static load (Sonet),
to highly dynamic (IP) load. The key enabler of this
architecture is a scheduling mechanism that schedules
transmissions for maximal throughput. We propose a
distributed scheduling scheme that is randomized for
highly dynamic load and can learn to adjust for
quasi-static load. We derive analytical formulae for
the performance of the proposed scheme when load is
highly dynamic, show that it outperforms standard
protocols (such as aloha) and illustrate the effect of
learning for quasi-static load through simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bekker:2004:ITF,
author = "Ren{\'e} Bekker and Sem Borst and Rudesindo
N{\'e}{\~n}ez-Queija",
title = "Integration of {TCP}-friendly streaming sessions and
heavy-tailed elastic flows",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "41--43",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a fixed number of streaming sessions
sharing a bottleneck link with a dynamic population of
elastic flows. We assume that the sizes of the elastic
flows exhibit heavy-tailed characteristics. The elastic
flows are TCP-controlled, while the transmission rates
of the streaming applications are governed by a
so-called TCP-friendly rate control
protocol.\par
Adopting the Processor-Sharing (PS) discipline to model
the bandwidth sharing, we investigate the tail
distribution of the deficit in service received by the
streaming sessions compared to a nominal service
target. The latter metric provides an indication for
the quality experienced by the streaming applications.
The results yield valuable qualitative insight into the
occurrence of persistent quality disruption for the
streaming users. We also examine the delay performance
of the elastic flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanKessel:2004:ARA,
author = "Gijs van Kessel and Rudesindo N{\'u}{\~n}ez-Queija and
Sem Borst",
title = "Asymptotic regimes and approximations for
discriminatory processor sharing",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "44--46",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the joint queue length distribution of the
Discriminatory Processor Sharing model, assuming all
classes have phase-type service requirement
distributions. We show that the moments of the joint
queue length distribution can be obtained by solving
linear equations. We use this to study the system in
two asymptotic regimes. In the first regime, the
different user classes operate on strictly separated
time scales. Then we study the system in heavy
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cui:2004:ODM,
author = "Yi Cui and Yuan Xue and Klara Nahrstedt",
title = "Optimal distributed multicast routing using network
coding: theory and applications",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "47--49",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035353",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optimal data routing in a network can be often
understood as a multicommodity flow problem. Given a
network and a set of commodities, i.e., a set of
source-destination pairs, one tries to achieve certain
optimization goal, such as minimum delay, maximum
throughput, while maintaining certain fairness among
all commodities. The constraints of such optimization
problems are usually network link capacity and traffic
demand of each commodity. Multicommodity flow problem
has been well studied as a typical linear programming
problem. Its distributed solutions have also been
proposed[2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2004:CPS,
author = "Xuan Li and David D. Yao",
title = "Control and pricing in stochastic networks with
concurrent resource occupancy",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "50--52",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035354",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Concurrent resource occupancy pervades most
engineering and service systems. For example, a
multi-leg plane trip requires seat reservation on
several connecting flights; a configure-to-order
product demands the simultaneous processing of all its
components; a file transfer on the Internet needs
band-width on all the links along its route from source
to destination. The object of our study is a network
with stochastic concurrent occupancy of resources. The
network can be physical (e.g., a telecommunication
network), or virtual (e.g., the Worldwide Web), or
relational (e.g., the bill of materials of a product,
representing its configuration of all components); and
both the demand/order arrivals and their processing
times required of the resources are stochastic. Our
goal is to do revenue optimization in the network
through two decisions: (a) pricing: to determine the
price charged to each job class and its dynamic
adjustment over time; and (b) resource control: to
regulate the distribution of resources among the job
classes, in particular, when to accept/reject a job and
from which class.\par
Below, we highlight a new fixed-point approximation for
a network operating under a set of thresholds that
control the access of jobs from each class. With this
fixed-point approximation, the resource control problem
takes the form of setting the optimal thresholds, which
can be formulated and solved as a linear program. To
determine the optimal prices then amounts to solving
another set of optimality equations on top of the
linear program. Furthermore, we can show that our
approach via solving optimization problems based on the
fixed-point approximation is optimal in some asymptotic
sense.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2004:OPR,
author = "Xin Guo and Yingdong Lu and Mark S. Squillante",
title = "Optimal probabilistic routing in distributed parallel
queues",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "53--54",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035355",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the fundamental problem of routing
customers among distributed parallel queues to minimize
an objective function based on equilibrium sojourn
times under general assumptions for the arrival and
service processes and under the assumption that
customers are routed to the parallel queues in a
probabilistic manner. More specifically, we derive
explicit solutions for the asymptotically optimal
vector of probabilities that control the routing of
customers upon arrival among a set of heterogeneous
general single-server queues through stochastic-process
limits. Our assumption of probabilistic routing is
consistent with previous theoretical studies of this
optimization problem, and our solutions can be used for
the parameter settings of other routing mechanisms
found in practice. Stochastic-process limits are
exploited in order to be able to handle general arrival
and service processes and obtain explicit solutions to
the scheduling optimization problems of interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neto:2004:CBU,
author = "Humberto T. Marques Neto and Jussara M. Almeida and
Leonardo C. D. Rocha and Wagner Meira and Pedro H. C.
Guerra and Virgilio A. F. Almeida",
title = "A characterization of broadband user behavior and
their e-business activities",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "3--13",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a characterization of broadband
user behavior from an Internet Service Provider
standpoint. Users are broken into two major categories:
residential and Small-Office/Home-Office (SOHO). For
each user category, the characterization is performed
along four criteria: (i) session arrival process, (ii)
session duration, (iii) number of bytes transferred
within a session and (iv) user request patterns. Our
results show that both residential and SOHO session
inter-arrival times are exponentially distributed.
Whereas residential session arrival rates remain
relatively high during the day, SOHO session arrival
rates vary much more significantly during the day. On
the other hand, a typical SOHO user session is longer
and transfers a larger volume of data. Furthermore, our
analysis uncovers two main groups of session request
patterns within each user category. The first group
consists of user sessions that use traditional Internet
services, such as e-mail, instant messenger and,
mostly, www services. On the other hand, sessions from
the second group, a smaller group, use typically
peer-to-peer file sharing applications, remain active
for longer periods and transfer a large amount of data.
Looking further into the e-business services most
commonly accessed, we found that subscription-based and
advertising services account for the vast majority of
user HTTP requests in both residential and SOHO
workloads. Understanding these user behavior patterns
is important to the development of more efficient
applications for broadband users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andreolini:2004:FGP,
author = "Mauro Andreolini and Michele Colajanni and Riccardo
Lancellotti and Francesca Mazzoni",
title = "Fine grain performance evaluation of e-commerce
sites",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "14--23",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-commerce sites are still a reference for the Web
technology in terms of complexity and performance
requirements, including availability and scalability.
In this paper we show that a coarse grain analysis,
that is used in most performance studies, may lead to
incomplete or false deductions about the behavior of
the hardware and software components supporting
e-commerce sites. Through a fine grain performance
evaluation of a medium size e-commerce site, we find
some interesting results that demonstrate the
importance of an analysis approach that is carried out
at the software function level with the combination of
distribution oriented metrics instead of average
values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sopitkamol:2004:RCP,
author = "Monchai Sopitkamol",
title = "Ranking configuration parameters in multi-tiered
e-commerce sites",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "24--33",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-commerce systems are composed of many components
with several configurable parameters that, if properly
configured, can optimize system performance. Before
upgrading existing systems to overcome performance
bottlenecks, several areas of a site's architecture and
its parameters may be adjusted to improve performance.
This paper provides a method to rank key configurable
e-commerce system parameters that significantly impact
overall system performance, and the performance of the
most significant Web function types. We consider both
on-line and off-line parameters at each of the
e-commerce system layers: Web server, application
server, and database server. In order to accomplish our
task, we designed a practical, ad-hoc approach that
involves conducting experiments on a testbed system
setup as a small e-commerce site. The configurable
parameters are ranked based on their degrees of
performance improvement to the system and to the most
critical Web functions. The performance metrics of
interest include server's response time, system
throughput, and probability of rejecting a customer's
request. The experiments were conducted on an
e-commerce site compliant to the TPC-W benchmark.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DAntonio:2004:ASC,
author = "S. D'Antonio and M. Esposito and S. P. Romano and G.
Ventre",
title = "Assessing the scalability of component-based
frameworks: the {CADENUS} case study",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "34--43",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052311",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes an approach to scalability
analysis of component-based systems. A theoretical
model of the orchestrated behavior of a system's
components is developed and potential bottlenecks are
identified. The model is derived by performing an
analysis of the average number of messages that each
involved entity has to deal with, i.e. receive,
elaborate and possibly forward. By appropriately
setting the various model parameters, it is possible to
evaluate a system's behavior in a number of different
scenarios. The model itself is based upon a queuing
network paradigm, whereby each component is associated
with a `service centre' characterized by specific
values of both the message arrival rate and the service
time: based on such values, the utilization coefficient
of the service centers is computed and the potential
bottlenecks are identified. The queuing network model
is also exploited to evaluate the performance of the
overall system under various configurations. The
proposed approach is introduced and developed by taking
the CADENUS system as a running example. CADENUS is a
component-based framework designed and developed within
a recent IST project, whose main goal resides in the
provisioning of Premium IP services by means of an
effective application of the so-called {\em mediation
paradigm.\/}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mediation; network protocols; probability theory;
queuing networks; scalability",
}
@Article{Ye:2004:RRS,
author = "Tao Ye and Shivkumar Kalyanaraman",
title = "A recursive random search algorithm for network
parameter optimization",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "44--53",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052306",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a new heuristic search algorithm,
Recursive Random Search(RRS), for black-box
optimization problems. Specifically, this algorithm is
designed for the dynamical parameter optimization of
network protocols which emphasizes on obtaining good
solutions within a limited time frame rather than full
optimization. The RRS algorithm is based on the initial
high-efficiency property of random sampling and
attempts to maintain this high-efficiency by constantly
`restarting' random sampling with adjusted sample
spaces. Due to its basis on random sampling, the RRS
algorithm is robust to the effect of random noises in
the objective function and it performs especially
efficiently when handling the objective functions with
negligible parameters. These properties have been
demonstrated with the tests on a suite of benchmark
functions. The RRS algorithm has been successfully
applied to the optimal configuration of several network
protocols. One application to a network routing
algorithm is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haverkort:2005:PV,
author = "Boudewijn R. Haverkort and Joost-Pieter Katoen",
title = "Performance and verification",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "3--3",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Some twenty five years ago, the field of
computer-communication system performance evaluation
and the field of formal specification and verification
were regarded as completely disjunct. The former field
focussed on the quantitative aspects of system
behaviour, expressed in measures such as delays,
throughputs and loss probabilities, whereas the latter
field focussed on the qualitative aspects of system
behaviour, expressed in measures (or, properties) such
as system liveness, deadlock freeness and safety. Over
the years, however, this distinction has shown to be
not always useful. In fact, we see a large variety of
systems for which the qualitative behaviour cannot be
decoupled from the quantitative aspect. Think for
instance of communication protocols in an embedded
system setting: the qualitative correctness of a
protocol, without considering (absolute) timing
aspects, is not enough for classifying a protocol as
correct. Indeed, only when the protocol behaves as it
should, and does so {\em in a timely manner,\/} the
protocol can be regarded as correct. Observations of
this kind have lead to a variety of integrated
approaches toward performance evaluation and
verification.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:2005:IDS,
author = "Gianfranco Ciardo and Andrew S. Miner",
title = "Implicit data structures for logic and stochastic
systems analysis",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "4--9",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both logic and stochastic analysis have strong
theoretical underpinnings, but they have been
traditionally relegated to separate areas of computer
science, the former focusing on logic and discrete
algorithms, the latter on exact or approximate
numerical methods. In the last few years, though, there
has been a convergence of research in these two areas,
due to the realization that data structures used in one
area can benefit the other and that, by merging the
goals of the two areas, a more integrated approach to
system analysis can be derived. In this paper, we
describe some of the beneficial interactions between
the two, and some of the research challenges ahead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baier:2005:MCM,
author = "Christel Baier and Boudewijn R. Haverkort and Holger
Hermanns and Joost-Pieter Katoen",
title = "Model checking meets performance evaluation",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "10--15",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov chains are one of the most popular models for
the evaluation of performance and dependability of
information processing systems. To obtain performance
measures, typically long-run or transient state
probabilities of Markov chains are determined.
Sometimes the Markov chain at hand is equipped with
rewards and computations involve determining long-run
or instantaneous reward probabilities. This note
summarises a technique to determine performance and
dependability {\em guarantees\/} of Markov chains.
Given a precise description of the desired guarantee,
all states in the Markov chain are determined that
surely meet the guarantee. This is done in a fully
automated way. Guarantees are described using logics.
The use of logics yields an expressive framework that
allows to express well-known measures, but also (new)
intricate and complex performance guarantees. The power
of this technique is that no matter how complex the
logical guarantee, it is {\em automatically\/} checked
which states in the Markov chain satisfy it. Neither
manual manipulations of Markov chains (or their
high-level descriptions) are needed, nor the knowledge
of any numerical technique to analyze them efficiently.
This applies to any (time-homogeneous) Markov chain of
any structure specified in any high-level formalism.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwiatkowska:2005:PMC,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "Probabilistic model checking in practice: case studies
with {PRISM}",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "16--21",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe some practical applications
of {\em probabilistic model checking,\/} a technique
for the formal analysis of systems which exhibit
stochastic behaviour. We give an overview of a
selection of case studies carried out using the
probabilistic model checking tool PRISM, demonstrating
the wide range of application domains to which these
methods are applicable. We also illustrate several
benefits of using formal verification techniques to
analyse probabilistic systems, including: (i) that they
allow a wide range of numerical properties to be
computed accurately; and (ii) that they perform a
complete and exhaustive analysis enabling, for example,
a study of best- and worst-case scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baier:2005:PVM,
author = "Christel Baier and Frank Ciesinski and Marcus
Gr{\"o}{\ss}er",
title = "{ProbMela} and verification of {Markov} decision
processes",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "22--27",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov decision processes (MDP) can serve as
operational model for probabilistic distributed systems
and yield the basis for model checking algorithms
against qualitative or quantitative properties. In this
paper, we summarize the main steps of a quantitative
analysis for a given MDP and formula of linear temporal
logic, give an introduction to the modelling language
ProbMela which provides a simple and intuitive way to
describe complex systems with a MDP-semantics and
present the basic features of the MDP model checker
LiQuor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jansen:2005:QMA,
author = "David N. Jansen and Holger Hermanns",
title = "{QoS} modelling and analysis with {UML} statecharts:
the {StoCharts} approach",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "28--33",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The UML is an influential and widespread notation for
high-level modelling of information processing systems.
UML statechart diagrams are a graphical language to
describe system behaviour. They constitute one of the
most intensively-used formalisms comprised by the UML.
However, statechart diagrams are lacking concepts for
describing real-time, performance, dependability and
quality of service (QoS) characteristics at a
behavioural level. This note describes a QoS-oriented
extension of UML statechart diagrams, called StoCharts.
StoCharts enhance the basic statechart formalism with
two distinguished features, both simple and easy to
understand, yet powerful enough to model a sufficiently
rich class of stochastic processes. This is illustrated
by a selection of case studies performed using
StoCharts. We review the main ingredients of StoCharts
and survey tool support and case studies performed with
the language, and place StoCharts in the context of
other extensions of statechart diagrams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Behrmann:2005:OSU,
author = "Gerd Behrmann and Kim G. Larsen and Jacob I.
Rasmussen",
title = "Optimal scheduling using priced timed automata",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "34--40",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This contribution reports on the considerable effort
made recently towards extending and applying
well-established timed automata technology to optimal
scheduling and planning problems. The effort of the
authors in this direction has to a large extent been
carried out as part of the European projects VHS [20]
and AMETIST [16] and are available in the recently
released UPPAAL CORA [12], a variant of the real-time
verification tool UPPAAL [18, 5] specialized for
cost-optimal reachability for the extended model of
so-called priced timed automata.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McIver:2005:ARP,
author = "Annabelle McIver and Carroll Morgan",
title = "Abstraction and refinement in probabilistic systems",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "41--47",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We summarise a verification method for probabilistic
systems that is based on abstraction and refinement,
and extends traditional assertional styles of
verification. The approach makes extensive use of the
{\em expectation transformers of pGCL\/} [17, 16, 13],
a compact probabilistic programming language with an
associated logic of real-valued functions. Analysis of
large systems is made tractable by abstraction which,
together with algebraic and logical reasoning, results
in strong and general guarantees about
probabilistic-system properties. Although our examples
are specific (to {\em pGCL\/}), our overall goal in
this note is to advocate the hierarchical development
of probabilistic programs via levels of abstraction,
connected by refinement, and to illustrate the proof
obligations incurred by such an approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hoelzle:2005:GHL,
author = "Urs Hoelzle",
title = "{Google}: or how {I} learned to love terabytes",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "1--1",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064213",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Search is one of the most important applications used
on the internet, but it also poses some of the most
interesting challenges in computer science. Providing
high-quality search requires understanding across a
wide range of computer science disciplines, from
lower-level systems issues like computer architecture
and distributed systems to applied areas like
information retrieval, machine learning, data mining,
and user interface design. In this talk I'll share some
interesting observations and measurements obtained at
Google, and will illustrate the behind-the-scenes
pieces of infrastructure (both hardware and software)
that we've built in order to extract this information
from many terabytes of data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massoulie:2005:CRS,
author = "Laurent Massouli{\'e} and Milan Vojnovi{\'C}",
title = "Coupon replication systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "2--13",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064215",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the study of peer-to-peer file swarming
systems {\`a} la BitTorrent, we introduce a
probabilistic model of {\em coupon replication
systems}. These systems consist of users, aiming to
complete a collection of distinct coupons. Users are
characterised by their current collection of coupons,
and leave the system once they complete their coupon
collection. The system evolution is then specified by
describing how users of distinct types meet, and which
coupons get replicated upon such encounters. For open
systems, with exogenous user arrivals, we derive
necessary and sufficient stability conditions in a
layered scenario, where encounters are between users
holding the same number of coupons. We also consider a
system where encounters are between users chosen
uniformly at random from the whole population. We show
that performance, captured by sojourn time, is
asymptotically optimal in both systems as the number of
coupon types becomes large. We also consider closed
systems with no exogenous user arrivals. In a special
scenario where users have only one missing coupon, we
evaluate the size of the population ultimately
remaining in the system, as the initial number of
users, $N$, goes to infinity. We show that this
decreases geometrically with the number of coupons,
$K$. In particular, when the ratio $K$ /log($N$) is
above a critical threshold, we prove that this number
of left-overs is of order $ \log (\log (N))$. These
results suggest that performance of file swarming
systems does not depend critically on either altruistic
user behavior, or on load balancing strategies such as
{\em rarest first}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content distribution; file swarming; peer-to-peer",
}
@Article{Tang:2005:LTO,
author = "Chunqiang Tang and Melissa J. Buco and Rong N. Chang
and Sandhya Dwarkadas and Laura Z. Luan and Edward So
and Christopher Ward",
title = "Low traffic overlay networks with large routing
tables",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "14--25",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064216",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The routing tables of Distributed Hash Tables (DHTs)
can vary from size $ O(1) $ to $ O(n) $. Currently,
what is lacking is an analytic framework to suggest the
optimal routing table size for a given workload. This
paper (1) compares DHTs with $ O(1) $ to $ O(n) $
routing tables and identifies some good design points;
and (2) proposes protocols to realize the potential of
those good design points. We use total traffic as the
uniform metric to compare heterogeneous DHTs and
emphasize the balance between maintenance cost and
lookup cost. Assuming a node on average processes 1,000
or more lookups during its entire lifetime, our
analysis shows that large routing tables actually lead
to both low traffic and low lookup hops. These good
design points translate into one-hop routing for
systems of medium size and two-hop routing for large
systems. Existing one-hop or two-hop protocols are
based on a hierarchy. We instead demonstrate that it is
possible to achieve completely decentralized one-hop or
two-hop routing, i.e., without giving up being
peer-to-peer. We propose 1h-Calot for one-hop routing
and 2h-Calot for two-hop routing. Assuming a moderate
lookup rate, compared with DHTs that use $ O(\log n) $
routing tables, 1h-Calot and 2h-Calot save traffic by
up to 70\% while resolving lookups in one or two hops
as opposed to $ O(\log n) $ hops.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed hash table; overlay network; peer-to-peer
system",
}
@Article{Leonard:2005:LBN,
author = "Derek Leonard and Vivek Rai and Dmitri Loguinov",
title = "On lifetime-based node failure and stochastic
resilience of decentralized peer-to-peer networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "26--37",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064217",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To understand how high rates of churn and random
departure decisions of end-users affect connectivity of
P2P networks, this paper investigates resilience of
random graphs to lifetime-based node failure and
derives the expected delay before a user is forcefully
isolated from the graph and the probability that this
occurs within his/her lifetime. Our results indicate
that systems with heavy-tailed lifetime distributions
are more resilient than those with light-tailed (e.g.,
exponential) distributions and that for a given average
degree, $k$-regular graphs exhibit the highest
resilience. As a practical illustration of our results,
each user in a system with $n$ = 100 billion peers,
30-minute average lifetime, and 1-minute
node-replacement delay can stay connected to the graph
with probability $ 1 - 1 / n$ using only 9 neighbors.
This is in contrast to 37 neighbors required under
previous modeling efforts. We finish the paper by
showing that many P2P networks are {\em almost
surely\/} (i.e., with probability $ 1 - o(1)$)
connected if they have no isolated nodes and derive a
simple model for the probability that a P2P system
partitions under churn.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Pareto; peer-to-peer; stochastic lifetime resilience",
}
@Article{Dumitriu:2005:DSR,
author = "D. Dumitriu and E. Knightly and A. Kuzmanovic and I.
Stoica and W. Zwaenepoel",
title = "Denial-of-service resilience in peer-to-peer file
sharing systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "38--49",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064218",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peer-to-peer (p2p) file sharing systems are
characterized by highly replicated content distributed
among nodes with enormous aggregate resources for
storage and communication. These properties alone are
not sufficient, however, to render p2p networks immune
to denial-of-service (DoS) attack. In this paper, we
study, by means of analytical modeling and simulation,
the resilience of p2p file sharing systems against DoS
attacks, in which malicious nodes respond to queries
with erroneous responses. We consider the file-targeted
attacks in current use in the Internet, and we
introduce a new class of p2p-network-targeted attacks.
In file-targeted attacks, the attacker puts a large
number of corrupted versions of a {\em single\/} file
on the network. We demonstrate that the effectiveness
of these attacks is highly dependent on the clients'
behavior. For the attacks to succeed over the long
term, clients must be unwilling to share files, slow in
removing corrupted files from their machines, and quick
to give up downloading when the system is under attack.
In network-targeted attacks, attackers respond to
queries for {\em any\/} file with erroneous
information. Our results indicate that these attacks
are highly scalable: increasing the number of malicious
nodes yields a hyperexponential decrease in system
goodput, and a moderate number of attackers suffices to
cause a near-collapse of the entire system. The key
factors inducing this vulnerability are (i)
hierarchical topologies with misbehaving `supernodes,'
(ii) high path-length networks in which attackers have
increased opportunity to falsify control information,
and (iii) power-law networks in which attackers insert
themselves into high-degree points in the graph.
Finally, we consider the effects of client
counter-strategies such as randomized reply selection,
redundant and parallel download, and reputation
systems. Some counter-strategies (e.g., randomized
reply selection) provide considerable immunity to
attack (reducing the scaling from hyperexponential to
linear), yet significantly hurt performance in the
absence of an attack. Other counter-strategies yield
little benefit (or penalty). In particular, reputation
systems show little impact unless they operate with
near perfection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "denial of service; file pollution; network-targeted
attacks; peer-to-peer",
}
@Article{Moore:2005:ITC,
author = "Andrew W. Moore and Denis Zuev",
title = "{Internet} traffic classification using {Bayesian}
analysis techniques",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "50--60",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064220",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Accurate traffic classification is of fundamental
importance to numerous other network activities, from
security monitoring to accounting, and from Quality of
Service to providing operators with useful forecasts
for long-term provisioning. We apply a Na{\"\i}ve Bayes
estimator to categorize traffic by application.
Uniquely, our work capitalizes on hand-classified
network data, using it as input to a supervised
Na{\"\i}ve Bayes estimator. In this paper we illustrate
the high level of accuracy achievable with the
Na{\"\i}ve Bayes estimator. We further illustrate the
improved accuracy of refined variants of this
estimator. Our results indicate that with the simplest
of Na{\"\i}ve Bayes estimator we are able to achieve
about 65\% accuracy on per-flow classification and with
two powerful refinements we can improve this value to
better than 95\%; this is a vast improvement over
traditional techniques that achieve 50--70\%. While our
technique uses training data, with categories derived
from packet-content, all of our training and testing
was done using header-derived discriminators. We
emphasize this as a powerful aspect of our approach:
using samples of well-known traffic to allow the
categorization of traffic using commonly available
information alone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flow classification; Internet traffic; traffic
identification",
}
@Article{Kumar:2005:DSA,
author = "Abhishek Kumar and Minho Sung and Jun (Jim) Xu and
Ellen W. Zegura",
title = "A data streaming algorithm for estimating
subpopulation flow size distribution",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "61--72",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Statistical information about the flow sizes in the
traffic passing through a network link helps a network
operator to characterize network resource usage, infer
traffic demands, detect traffic anomalies, and improve
network performance through traffic engineering.
Previous work on estimating the flow size distribution
for the {\em complete population\/} of flows has
produced techniques that either make inferences from
sampled network traffic, or use data streaming
approaches. In this work, we identify and solve a more
challenging problem of estimating the size distribution
and other statistical information about {\em arbitrary
subpopulations\/} of flows. Inferring subpopulation
flow statistics is more challenging than the complete
population counterpart, since subpopulations of
interest are often specified {\em a posteriori\/}
(i.e., after the data collection is done), making it
impossible for the data collection module to `plan in
advance'. Our solution consists of a novel mechanism
that combines data streaming with traditional packet
sampling to provide highly accurate estimates of
subpopulation flow statistics. The algorithm employs
two data collection modules operating in parallel --- a
NetFlow-like packet sampler and a streaming data
structure made up of an array of counters. Combining
the data collected by these two modules, our estimation
algorithm uses a statistical estimation procedure that
correlates and decodes the outputs (observations) from
both data collection modules to obtain flow statistics
for any arbitrary subpopulation. Evaluations of this
algorithm on real-world Internet traffic traces
demonstrate its high measurement accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; EM algorithm; flow statistics;
statistical inference; traffic analysis",
}
@Article{Cohen:2005:PCL,
author = "Edith Cohen and Carsten Lund",
title = "Packet classification in large {ISPs}: design and
evaluation of decision tree classifiers",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "73--84",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064222",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet classification, although extensively studied,
is an evolving problem. Growing and changing needs
necessitate the use of larger filters with more complex
rules. The increased complexity and size pose
implementation challenges on current hardware solutions
and drive the development of software classifiers, in
particular, decision-tree based classifiers. Important
performance measures for these classifiers are time and
memory due to required high throughput and use of
limited fast memory. We analyze Tier 1 ISP data that
includes filters and corresponding traffic from over a
hundred edge routers and thousands of interfaces. We
provide a comprehensive view on packet classification
in an operational network and glean insights that help
us design more effective classification algorithms. We
propose and evaluate decision tree classifiers with
{\em common branches}. These classifiers have linear
worst-case memory bounds and require much less memory
than standard decision tree classifiers, but
nonetheless, we show that on our data have similar
average and worst-case time performance. We argue that
common-branches exploit structure that is present in
real-life data sets. We observe a strong Zipf-like
pattern in the usage of rules in a classifier, where a
very small number of rules resolves the bulk of traffic
and most rules are essentially never used. Inspired by
this observation, we propose {\em traffic-aware\/}
classifiers that obtain superior average-case and
bounded worst-case performance. Good average-case can
boost performance of software classifiers that can be
used in small to medium sized routers and are also
important for traffic analysis and traffic
engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "access control lists; decision trees; packet
filtering; routing",
}
@Article{Keys:2005:RSA,
author = "Ken Keys and David Moore and Cristian Estan",
title = "A robust system for accurate real-time summaries of
{Internet} traffic",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "85--96",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064223",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Good performance under extreme workloads and isolation
between the resource consumption of concurrent jobs are
perennial design goals of computer systems ranging from
multitasking servers to network routers. In this paper
we present a specialized system that computes multiple
summaries of IP traffic in real time and achieves
robustness and isolation between tasks in a novel way:
by automatically adapting the parameters of the
summarization algorithms. In traditional systems,
anomalous network behavior such as denial of service
attacks or worms can overwhelm the memory or CPU,
making the system produce meaningless results exactly
when measurement is needed most. In contrast, our
measurement system reacts by gracefully degrading the
accuracy of the affected summaries. The types of
summaries we compute are widely used by network
administrators monitoring the workloads of their
networks: the ports sending the most traffic, the IP
addresses sending or receiving the most traffic or
opening the most connections, etc. We evaluate and
compare many existing algorithmic solutions for
computing these summaries, as well as two new solutions
we propose here: `flow sample and hold' and `Bloom
filter tuple set counting'. Compared to previous
solutions, these new solutions offer better memory
versus accuracy tradeoffs and have more predictable
resource consumption. Finally, we evaluate the actual
implementation of a complete system that combines the
best of these algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive response; measurement; passive monitoring;
sampling; traffic estimation",
}
@Article{Choi:2005:PCW,
author = "Sunwoong Choi and Kihong Park and Chong-kwon Kim",
title = "On the performance characteristics of {WLANs}:
revisited",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "97--108",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064225",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wide-spread deployment of infrastructure WLANs has
made Wi-Fi an integral part of today's Internet access
technology. Despite its crucial role in affecting
end-to-end performance, past research has focused on
MAC protocol enhancement, analysis and simulation-based
performance evaluation without sufficient consideration
for modeling inaccuracies stemming from inter-layer
dependencies, including physical layer diversity, that
significantly impact performance. We take a fresh look
at IEEE 802.11 WLANs, and using a combination of
experiment, simulation, and analysis demonstrate its
surprisingly agile performance traits. Our main
findings are two-fold. First, contention-based MAC
throughput degrades gracefully under congested
conditions, enabled by physical layer channel diversity
that reduces the effective level of MAC contention. In
contrast, fairness and jitter significantly degrade at
a critical offered load. This duality obviates the need
for link layer flow control for throughput improvement
but necessitates traffic control for fairness and QoS.
Second, TCP-over-WLAN achieves high throughput
commensurate with that of wireline TCP under saturated
conditions, challenging the widely held perception that
TCP throughput fares poorly over WLANs when subject to
heavy contention. We show that TCP-over-WLAN prowess is
facilitated by the self-regulating actions of DCF and
TCP congestion control that jointly drive the shared
physical channel at an effective load of 2--3 wireless
stations, even when the number of active stations is
very large. Our results highlight subtle inter-layer
dependencies including the mitigating influence of
TCP-over-WLAN on dynamic rate shifting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DCF performance; inter-layer dependence; physical
layer diversity; rate control; TCP-over-WLAN
performance",
}
@Article{Ramaiyan:2005:FPA,
author = "Venkatesh Ramaiyan and Anurag Kumar and Eitan Altman",
title = "Fixed point analysis of single cell {IEEE 802.11e}
{WLANs}: uniqueness, multistability and throughput
differentiation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "109--120",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064226",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the vector fixed point equations arising
out of the analysis of the saturation throughput of a
single cell IEEE 802.11e wireless local area network
with nodes that have different back-off parameters,
including different Arbitration InterFrame Space (AIFS)
values. We consider balanced and unbalanced solutions
of the fixed point equations arising in homogeneous and
nonhomogeneous networks. We are concerned, in
particular, with (i) whether the fixed point is
balanced within a class, and (ii) whether the fixed
point is unique. Our simulations show that when
multiple unbalanced fixed points exist in a homogeneous
system then the time behaviour of the system
demonstrates severe short term unfairness (or {\em
multistability\/}). Implications for the use of the
fixed point formulation for performance analysis are
also discussed. We provide a condition for the fixed
point solution to be balanced within a class, and also
a condition for uniqueness. We then provide an
extension of our general fixed point analysis to
capture AIFS based differentiation; again a condition
for uniqueness is established. An asymptotic analysis
of the fixed point is provided for the case in which
packets are never abandoned, and the number of nodes
goes to $ \infty $. Finally the fixed point equations
are used to obtain insights into the throughput
differentiation provided by different initial
back-offs, persistence factors, and AIFS, for finite
number of nodes, and for differentiation parameter
values similar to those in the standard.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "EDCF analysis; performance of wireless LANs; QoS in
wireless LANs; short term unfairness",
}
@Article{Lindemann:2005:MEI,
author = "Christoph Lindemann and Oliver P. Waldhorst",
title = "Modeling epidemic information dissemination on mobile
devices with finite buffers",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "121--132",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064227",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Epidemic algorithms have recently been proposed as an
effective solution for disseminating information in
large-scale peer-to-peer (P2P) systems and in mobile ad
hoc networks (MANET). In this paper, we present a
modeling approach for steady-state analysis of epidemic
dissemination of information in MANET. As major
contribution, the introduced approach explicitly
represents the spread of multiple data items, finite
buffer capacity at mobile devices and a least recently
used buffer replacement scheme. Using the introduced
modeling approach, we analyze seven degrees of
separation (7DS) as one well-known approach for
implementing P2P data sharing in a MANET using epidemic
dissemination of information. A validation of results
derived from the analytical model against simulation
shows excellent agreement. Quantitative performance
curves derived from the analytical model yield several
insights for optimizing the system design of 7DS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical performance modeling; mobile ad hoc
networks; peer-to-peer data sharing;
performance-oriented design and evaluation studies of
distributed systems",
}
@Article{Kumar:2005:AAC,
author = "V. S. Anil Kumar and Madhav V. Marathe and Srinivasan
Parthasarathy and Aravind Srinivasan",
title = "Algorithmic aspects of capacity in wireless networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "133--144",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers two inter-related questions: (i)
Given a wireless ad-hoc network and a collection of
source-destination pairs $ (i, t i) $, what is the
maximum throughput capacity of the network, i.e. the
rate at which data from the sources to their
corresponding destinations can be transferred in the
network? (ii) Can network protocols be designed that
jointly route the packets and schedule transmissions at
rates close to the maximum throughput capacity? Much of
the earlier work focused on random instances and proved
analytical lower and upper bounds on the maximum
throughput capacity. Here, in contrast, we consider
arbitrary wireless networks. Further, we study the
algorithmic aspects of the above questions: the goal is
to design provably good algorithms for arbitrary
instances. We develop analytical performance evaluation
models and distributed algorithms for routing and
scheduling which incorporate fairness, energy and
dilation (path-length) requirements and provide a
unified framework for utilizing the network close to
its maximum throughput capacity. Motivated by certain
popular wireless protocols used in practice, we also
explore `shortest-path like' path selection strategies
which maximize the network throughput. The theoretical
results naturally suggest an interesting class of
congestion aware link metrics which can be directly
{\em plugged into\/} several existing routing protocols
such as AODV, DSR, etc. We complement the theoretical
analysis with extensive simulations. The results
indicate that routes obtained using our congestion
aware link metrics consistently yield higher throughput
than hop-count based shortest path metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity modeling; end-to-end scheduling; linear
programming; wireless networks",
}
@Article{Chen:2005:EEM,
author = "Zhifeng Chen and Yan Zhang and Yuanyuan Zhou and Heidi
Scott and Berni Schiefer",
title = "Empirical evaluation of multi-level buffer cache
collaboration for storage systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "145--156",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To bridge the increasing processor-disk performance
gap, buffer caches are used in both storage clients
(e.g. database systems) and storage servers to reduce
the number of slow disk accesses. These buffer caches
need to be managed effectively to deliver the
performance commensurate to the aggregate buffer cache
size. To address this problem, two paradigms have been
proposed recently to {\em collaboratively\/} manage
these buffer caches together: the {\em hierarchy-aware
caching\/} maintains the same I/O interface and is
fully transparent to the storage client software, and
the {\em aggressively-collaborative caching\/} trades
off transparency for performance and requires changes
to both the interface and the storage client software.
Before storage industry starts to implement
collaborative caching in real systems, it is crucial to
find out whether sacrificing transparency is really
worthwhile, i.e., how much can we gain by using the
aggressively-collaborative caching instead of the
hierarchy-aware caching? To accurately answer this
question, it is required to consider all possible
combinations of recently proposed local replacement
algorithms and optimization techniques in both
collaboration paradigms. Our study provides an
empirical evaluation to address the above questions.
Particularly, we have compared three
aggressively-collaborative approaches with two
hierarchy-aware approaches for four different types of
database/file I/O workloads using traces collected from
real commercial systems such as IBM DB2. More
importantly, we separate the effects of collaborative
caching from local replacement algorithms and
optimizations, and uniformly apply several recently
proposed local replacement algorithms and optimizations
to all five collaboration approaches. When appropriate
local optimizations and replacement algorithms are
uniformly applied to both hierarchy-aware and
aggressively-collaborative caching, the results
indicate that hierarchy-aware caching can deliver
similar performance as aggressively-collaborative
caching. The results show that the
aggressively-collaborative caching only provides less
than 2.5\% performance improvement on average in
simulation and 1.0\% in real system experiments over
the hierarchy-aware caching for most workloads and
cache configurations. Our sensitivity study indicates
that the performance gain of aggressively-collaborative
caching is also very small for various storage networks
and different cache configurations. Therefore,
considering its simplicity and generality,
hierarchy-aware caching is more feasible than
aggressively-collaborative caching.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "collaborative caching; database; file system; storage
system",
}
@Article{Butt:2005:PIK,
author = "Ali R. Butt and Chris Gniady and Y. Charlie Hu",
title = "The performance impact of kernel prefetching on buffer
cache replacement algorithms",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "157--168",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064231",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A fundamental challenge in improving the file system
performance is to design effective block replacement
algorithms to minimize buffer cache misses. Despite the
well-known interactions between prefetching and
caching, almost all buffer cache replacement algorithms
have been proposed and studied comparatively without
taking into account file system prefetching which
exists in all modern operating systems. This paper
shows that such kernel prefetching can have a
significant impact on the relative performance in terms
of the number of actual disk I/Os of many well-known
replacement algorithms; it can not only narrow the
performance gap but also change the relative
performance benefits of different algorithms. These
results demonstrate the importance for buffer caching
research to take file system prefetching into
consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "buffer caching; prefetching; replacement algorithms",
}
@Article{Berg:2005:FDL,
author = "Erik Berg and Erik Hagersten",
title = "Fast data-locality profiling of native execution",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "169--180",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance tools based on hardware counters can
efficiently profile the cache behavior of an
application and help software developers improve its
cache utilization. Simulator-based tools can
potentially provide more insights and flexibility and
model many different cache configurations, but have the
drawback of large run-time overhead. We present
StatCache, a performance tool based on a statistical
cache model. It has a small run-time overhead while
providing much of the flexibility of simulator-based
tools. A monitor process running in the background
collects sparse memory access statistics about the
analyzed application running natively on a host
computer. Generic locality information is derived and
presented in a code-centric and/or data-centric view.
We evaluate the accuracy and performance of the tool
using ten SPEC CPU2000 benchmarks. We also exemplify
how the flexibility of the tool can be used to better
understand the characteristics of cache-related
performance problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache behavior; profiling tool",
}
@Article{Yotov:2005:AMM,
author = "Kamen Yotov and Keshav Pingali and Paul Stodghill",
title = "Automatic measurement of memory hierarchy parameters",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "181--192",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The running time of many applications is dominated by
the cost of memory operations. To optimize such
applications for a given platform, it is necessary to
have a detailed knowledge of the memory hierarchy
parameters of that platform. In practice, this
information is poorly documented if at all. Moreover,
there is growing interest in self-tuning, autonomic
software systems that can optimize themselves for
different platforms; these systems must determine
memory hierarchy parameters automatically without human
intervention. One solution is to use micro-benchmarks
to determine the parameters of the memory hierarchy. In
this paper, we argue that existing micro-benchmarks are
inadequate, and present novel micro-benchmarks for
determining parameters of all levels of the memory
hierarchy, including registers, all data caches and the
translation look-aside buffer. We have implemented
these micro-benchmarks in a tool called X-Ray that can
be ported easily to new platforms. We present
experimental results that show that X-Ray successfully
determines memory hierarchy parameters on current
platforms, and compare its accuracy with that of
existing tools.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "autonomic systems; caches; hardware parameters;
measurement; memory hierarchy; micro-benchmarks;
optimization; self-tuning",
}
@Article{Jonckheere:2005:OIR,
author = "M. Jonckheere and J. Virtamo",
title = "Optimal insensitive routing and bandwidth sharing in
simple data networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "193--204",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064235",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many communication systems can be efficiently modelled
using queueing networks with a stationary distribution
that is insensitive to detailed traffic characteristics
and depends on arrival rates and mean service
requirements only. This robustness enables simple
engineering rules and is thus of considerable practical
interest. In this paper we extend previous results by
relaxing the usual assumption of static routing and
balanced service rates to account for both dynamic
capacity allocation and dynamic load balancing. This
relaxation is necessary to model systems like grid
computing, for instance. Our results identify joint
dynamic allocation and routing policies for single
input reversible networks that are optimal for a wide
range of performance metrics. A simple two-pass
algorithm is presented for finding the optimal policy.
The derived analytical results are applied in a number
of simple numerical examples that illustrate their
modelling potential.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bandwidth allocation; insensitivity; joint
optimization; routing",
}
@Article{Wierman:2005:NIB,
author = "Adam Wierman and Mor Harchol-Balter and Takayuki
Osogami",
title = "Nearly insensitive bounds on {SMART} scheduling",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "205--216",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064236",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We define the class of SMART scheduling policies.
These are policies that bias towards jobs with small
remaining service times, jobs with small original
sizes, or both, with the motivation of minimizing mean
response time and/or mean slowdown. Examples of SMART
policies include PSJF, SRPT, and hybrid policies such
as RS (which biases according to the product of the
remaining size and the original size of a job).For many
policies in the SMART class, the mean response time and
mean slowdown are not known or have complex
representations involving multiple nested integrals,
making evaluation difficult. In this work, we prove
three main results. First, for all policies in the
SMART class, we prove simple upper and lower bounds on
mean response time. Second, we show that all policies
in the SMART class, surprisingly, have very similar
mean response times. Third, we show that the response
times of SMART policies are largely insensitive to the
variability of the job size distribution. In
particular, we focus on the SRPT and PSJF policies and
prove insensitive bounds in these cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "M/G/1; preemptive shortest job first; processor
sharing; PS; PSJF; response time; scheduling; shortest
remaining processing time; SMART; SRPT",
}
@Article{Kortebi:2005:ENA,
author = "A. Kortebi and L. Muscariello and S. Oueslati and J.
Roberts",
title = "Evaluating the number of active flows in a scheduler
realizing fair statistical bandwidth sharing",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "217--228",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite its well-known advantages, per-flow fair
queueing has not been deployed in the Internet mainly
because of the common belief that such scheduling is
not scalable. The objective of the present paper is to
demonstrate using trace simulations and analytical
evaluations that this belief is misguided. We show that
although the number of flows {\em in progress\/}
increases with link speed, the number that needs
scheduling at any moment is largely independent of this
rate. The number of such {\em active\/} flows is a
random process typically measured in hundreds even
though there may be tens of thousands of flows in
progress. The simulations are performed using traces
from commercial and research networks with quite
different traffic characteristics. Analysis is based on
models for balanced fair statistical bandwidth sharing
and applies properties of queue busy periods to explain
the observed behaviour.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical traffic model; fair queueing; statistical
bandwidth sharing; trace simulations",
}
@Article{Wierman:2005:CSP,
author = "Adam Wierman and Mor Harchol-Balter",
title = "Classifying scheduling policies with respect to higher
moments of conditional response time",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "229--240",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In addition to providing small mean response times,
modern applications seek to provide users predictable
service and, in some cases, Quality of Service (QoS)
guarantees. In order to understand the predictability
of response times under a range of scheduling policies,
we study the conditional variance in response times
seen by jobs of different sizes. We define a metric and
a criterion that distinguish between contrasting
functional behaviors of conditional variance, and we
then classify large groups of scheduling policies. In
addition to studying the conditional variance of
response times, we also derive metrics appropriate for
comparing higher conditional moments of response time
across job sizes. We illustrate that common statistics
such as raw and central moments are not appropriate
when comparing higher conditional moments of response
time. Instead, we find that cumulant moments should be
used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cumulants; FB; foreground-background; LAS; least
attained service; M/G/1; predictability; processor
sharing; PS; PSJF; response time; scheduling; SET;
shortest job first; shortest remaining processing time;
SRPT; variance",
}
@Article{Jiang:2005:WIT,
author = "Hao Jiang and Constantinos Dovrolis",
title = "Why is the {Internet} traffic bursty in short time
scales?",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "241--252",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064240",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet traffic exhibits multifaceted burstiness and
correlation structure over a wide span of time scales.
Previous work analyzed this structure in terms of
heavy-tailed session characteristics, as well as TCP
timeouts and congestion avoidance, in relatively long
time scales. We focus on shorter scales, typically less
than 100-1000 milliseconds. Our objective is to
identify the actual mechanisms that are responsible for
creating bursty traffic in those scales. We show that
TCP self-clocking, joint with queueing in the network,
can shape the packet interarrivals of a TCP connection
in a two-level ON-OFF pattern. This structure creates
strong correlations and burstiness in time scales that
extend up to the Round-Trip Time (RTT) of the
connection. This effect is more important for bulk
transfers that have a large bandwidth-delay product
relative to their window size. Also, the aggregation of
many flows, without rescaling their packet
interarrivals, does not converge to a Poisson stream,
as one might expect from classical superposition
results. Instead, the burstiness in those scales can be
significantly reduced by TCP pacing. In particular, we
focus on the importance of the minimum pacing timer,
and show that a 10-millisecond timer would be too
coarse for removing short-scale traffic burstiness,
while a 1-millisecond timer would be sufficient to make
the traffic almost as smooth as a Poisson stream in
sub-RTT scales.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "burstiness; ON-OFF model; TCP pacing; TCP
self-clocking; traffic modeling; wavelet-based
multiresolution analysis",
}
@Article{Roughan:2005:FBA,
author = "Matthew Roughan",
title = "Fundamental bounds on the accuracy of network
performance measurements",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "253--264",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064241",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers the basic problem of `how
accurate can we make Internet performance
measurements'. The answer is somewhat counter-intuitive
in that there are bounds on the accuracy of such
measurements, no matter how many probes we can use in a
given time interval, and thus arises a type of
Heisenberg inequality describing the bounds in our
knowledge of the performance of a network. The results
stem from the fact that we cannot make independent
measurements of a system's performance: all such
measures are correlated, and these correlations reduce
the efficacy of measurements. The degree of correlation
is also strongly dependent on system load. The result
has important practical implications that reach beyond
the design of Internet measurement experiments, into
the design of network protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "error estimation; Internet measurement; load
balancing; measurement planning; network performance",
}
@Article{Jain:2005:EEE,
author = "Manish Jain and Constantinos Dovrolis",
title = "End-to-end estimation of the available bandwidth
variation range",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "265--276",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064242",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The available bandwidth (avail-bw) of a network path
is an important performance metric and its end-to-end
estimation has recently received significant attention.
Previous work focused on the estimation of the average
avail-bw, ignoring the significant variability of this
metric in different time scales. In this paper, we show
how to estimate a given percentile of the avail-bw
distribution at a user-specified time scale. If two
estimated percentiles cover the bulk of the
distribution (say 10\% to 90\%), the user can obtain a
practical estimate for the avail-bw variation range. We
present two estimation techniques. The first is
iterative and non-parametric, meaning that it is more
appropriate for very short time scales (typically less
than 100ms), or in bottlenecks with limited flow
multiplexing (where the avail-bw distribution may be
non-Gaussian). The second technique is parametric,
because it assumes that the avail-bw follows the
Gaussian distribution, and it can produce an estimate
faster because it is not iterative. The two techniques
have been implemented in a measurement tool called
Pathvar. Pathvar can track the avail-bw variation range
within 10-20\%, even under non-stationary conditions.
Finally, we identify four factors that play a crucial
role in the variation range of the avail-bw: traffic
load, number of competing flows, rate of competing
flows, and of course the measurement time scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active measurement; bandwidth estimation; network
measurement tools; Pathvar; traffic variability",
}
@Article{Chiang:2005:NUM,
author = "Mung Chiang and J. W. Lee and R. Calderbank and D.
Palomar and M. Fazel",
title = "Network utility maximization with nonconcave, coupled,
and reliability-based utilities",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "277--277",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064246",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network Utility Maximization (NUM) has significantly
extended the classical network flow problem and
provided an emerging framework to design resource
allocation algorithms such as TCP congestion control
and to understand layering as optimization
decomposition. We present a summary of very recent
results in the theory and applications of NUM. We show
new distributed algorithms that converge to the
globally optimal rate allocation for NUM problems with
nonconcave utility functions representing inelastic
flows, with coupled utility functions representing
interference effects or hybrid social-selfish
utilities, and with rate-reliability tradeoff through
adaptive channel coding in the physical layer. We
conclude by discussing how do different decompositions
of a generalized NUM problem correspond to different
layering architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:2005:OCC,
author = "Mung Chiang and Steven Low",
title = "Optimization and Control of Communication Networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "277--277",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064244",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, there has been a surge in research
activities that utilize the power of recent
developments in nonlinear optimization to tackle a wide
scope of work in the analysis and design of
communication systems, touching every layer of the
layered network architecture, and resulting in both
intellectual and practical impacts significantly beyond
the earlier frameworks. These research activities are
driven by both new demands in the areas of
communications and networking, and new tools emerging
from optimization theory. Such tools include new
developments of powerful theories and highly efficient
computational algorithms for nonlinear convex
optimization, as well as global solution methods and
relaxation techniques for nonconvex optimization.
Optimization theory can be used to analyze, interpret,
or design a communication system, for both
forward-engineering and reverse-engineering. Over the
last few years, it has been successfully applied to a
wide range of communication systems, from the high
speed Internet core to wireless networks, from coding
and equalization to broadband access, and from
information theory to network topology models. Some of
the theoretical advances have also been put into
practice and started making visible impacts, including
new versions of TCP congestion control, power control
and scheduling algorithms in wireless networks, and
spectrum management in DSL broadband access networks.
Under the theme of optimization and control of
communication networks, this Hot Topic Session consists
of five invited talks covering a wide range of issues,
including protocols, pricing, resource allocation,
cross layer design, traffic engineering in the
Internet, optical transport networks, and wireless
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Low:2005:OMI,
author = "Steven Low and J. Doyle and L. Li and A. Tang and J.
Wang",
title = "Optimization model of {Internet} protocols",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "277--277",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064245",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Layered architecture is one of the most fundamental
and influential structures of network design. Can we
integrate the various protocol layers into a single
coherent theory by regarding them as carrying out an
asynchronous distributed primal-dual computation over
the network to implicitly solve a global optimization
problem? Different layers iterate on different subsets
of the decision variables using local information to
achieve individual optimalities, but taken together,
these local algorithms attempt to achieve a global
objective. Such a theory will expose the
interconnection between protocol layers and can be used
to study rigorously the performance tradeoff in
protocol layering as different ways to distribute a
centralized computation. In this talk, we describe some
preliminary work towards this goal and discuss some of
the difficulties of this approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:2005:JPN,
author = "Debasis Mitra",
title = "Joint pricing-network design and stochastic traffic
engineering to manage demand uncertainty",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "278--278",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064247",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I will describe two networking models, together with
their optimization techniques, that span several time
scales. In the longest time scale, where the goal is
capacity planning, I will describe the work of
Bienstock, Raskina, Saniee and Wang that considers
joint pricing and network design of optical transport
networks. Technological innovations are yielding
sharply decreasing unit costs. There is also empirical
evidence that suggests that the elasticity of bandwidth
demand to price is high. Integrating these features in
a unified profit-maximizing model leads to a
large-scale nonlinear optimization problem. In this
work, efficient solution techniques are developed to
maximize the carrier's net present value with respect
to pricing strategies and investment decisions for
technology acquisitions. In the work of Mitra and Wang
the time scale is shorter, the network infrastructure
is fixed, and a model for stochastic traffic
engineering is given in which the optimization is with
respect to bandwidth provisioning and route selection.
Traffic demands are uncertain, and the objective is to
maximize a risk-adjusted measure of network revenue
that is generated by serving demands. Considerable
attention is given to the appropriate measure of risk
in the network model. Risk-mitigation strategies are
also advanced. The optimization model, which is based
on mean-risk analysis, enables a service provider to
maximize a combined measure of mean revenue and revenue
risk. The conditions under which the optimization
problem is an instance of convex programming are
obtained. The solution is shown to satisfy the
stochastic efficiency criterion asymptotically. The
efficient frontier, which is the set of Pareto optimal
pairs of mean revenue and revenue risk, is obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musacchio:2005:AFR,
author = "John Musacchio and Jean Walrand",
title = "Achieving fair rates with ingress policing",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "278--278",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064249",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a simple ingress policing scheme for a
stochastic queuing network that uses a round-robin
service discipline, and derive conditions under which
the flow rates approach a max-min fair share
allocation. The scheme works as follows: Whenever any
of a flow's queues exceeds a policing threshold, the
network discards that flow's arriving packets at the
network ingress, and does so until all of that flow's
queues fall below their thresholds. To prove our
results, we use previously known results relating the
stability of a queuing system to the stability of its
fluid limit and extend these results to relate the flow
rates of the stochastic system to those of a
corresponding fluid model. In particular, we consider
the fluid limit of a sequence of queuing networks with
increasing thresholds. Using a Lyapunov function
derived from the fluid limits, we find that as the
policing thresholds are increased the state of the
stochastic system is attracted to a relatively smaller
and smaller neighborhood surrounding the equilibrium of
the fluid model. We then show how this property implies
that the achieved flow rates approach the max-min rates
predicted by the fluid model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shroff:2005:OBA,
author = "Ness Shroff and Xiaojun Lin",
title = "An optimization based approach for cross-layer design
in wireless communication networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "278--278",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064248",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this talk we study the issue of cross-layer design
for rate control in multihop wireless networks. We have
developed an optimal cross-layered rate control scheme
that jointly computes both the rate allocation and the
stabilizing schedule that controls the resources at the
underlying layers. However, the scheduling component in
this optimal cross-layered rate control scheme has to
solve a complex global optimization problem at each
time, and is hence too computationally expensive for
online implementation. Thus, we study the impact on the
performance of cross-layer rate control if the network
can only use an imperfect (and potentially distributed)
scheduling component that is easier to implement. We
study scenarios with both fixed number of users as well
as when the number of users change due to arrivals and
departures in the system. In each case, we establish
desirable results on the performance bounds of
cross-layered rate control with imperfect scheduling.
Our cross-layered approach provides provably better
performance bounds when compared with a layered
approach (that does not design rate control and
scheduling together). The insights drawn from our
analyses also enable us to design a fully distributed
cross-layered rate control and scheduling algorithm
under a restrictive interference model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2005:NSC,
author = "Florin Ciucu and Almut Burchard and J{\"o}rg
Liebeherr",
title = "A network service curve approach for the stochastic
analysis of networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "279--290",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064251",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The stochastic network calculus is an evolving new
methodology for backlog and delay analysis of networks
that can account for statistical multiplexing gain.
This paper advances the stochastic network calculus by
deriving a network service curve, which expresses the
service given to a flow by the network as a whole in
terms of a probabilistic bound. The presented network
service curve permits the calculation of statistical
end-to-end delay and backlog bounds for broad classes
of arrival and service distributions. The benefits of
the derived service curve are illustrated for the
exponentially bounded burstiness (EBB) traffic model.
It is shown that end-to-end performance measures
computed with a network service curve are bounded by $
O(H \log H) $, where $H$ is the number of nodes
traversed by a flow. Using currently available
techniques that compute end-to-end bounds by adding
single node results, the corresponding performance
measures are bounded by $ O(H^3)$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network service curve; quality-of-service; stochastic
network calculus",
}
@Article{Urgaonkar:2005:AMM,
author = "Bhuvan Urgaonkar and Giovanni Pacifici and Prashant
Shenoy and Mike Spreitzer and Asser Tantawi",
title = "An analytical model for multi-tier {Internet} services
and its applications",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "291--302",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since many Internet applications employ a multi-tier
architecture, in this paper, we focus on the problem of
analytically modeling the behavior of such
applications. We present a model based on a network of
queues, where the queues represent different tiers of
the application. Our model is sufficiently general to
capture (i) the behavior of tiers with significantly
different performance characteristics and (ii)
application idiosyncrasies such as session-based
workloads, concurrency limits, and caching at
intermediate tiers. We validate our model using real
multi-tier applications running on a Linux server
cluster. Our experiments indicate that our model
faithfully captures the performance of these
applications for a number of workloads and
configurations. For a variety of scenarios, including
those with caching at one of the application tiers, the
average response times predicted by our model were
within the 95\% confidence intervals of the observed
average response times. Our experiments also
demonstrate the utility of the model for dynamic
capacity provisioning, performance prediction,
bottleneck identification, and session policing. In one
scenario, where the request arrival rate increased from
less than 1500 to nearly 4200 requests/min, a dynamic
provisioning technique employing our model was able to
maintain response time targets by increasing the
capacity of two of the application tiers by factors of
2 and 3.5, respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet application; MVA algorithm; queuing model",
}
@Article{Chen:2005:MSE,
author = "Yiyu Chen and Amitayu Das and Wubi Qin and Anand
Sivasubramaniam and Qian Wang and Natarajan Gautam",
title = "Managing server energy and operational costs in
hosting centers",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "303--314",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growing cost of tuning and managing computer
systems is leading to out-sourcing of commercial
services to hosting centers. These centers provision
thousands of dense servers within a relatively small
real-estate in order to host the applications/services
of different customers who may have been assured by a
service-level agreement (SLA). Power consumption of
these servers is becoming a serious concern in the
design and operation of the hosting centers. The
effects of high power consumption manifest not only in
the costs spent in designing effective cooling systems
to ward off the generated heat, but in the cost of
electricity consumption itself. It is crucial to deploy
power management strategies in these hosting centers to
lower these costs towards enhancing profitability. At
the same time, techniques for power management that
include shutting down these servers and/or modulating
their operational speed, can impact the ability of the
hosting center to meet SLAs. In addition, repeated
on-off cycles can increase the wear-and-tear of server
components, incurring costs for their procurement and
replacement. This paper presents a formalism to this
problem, and proposes three new online solution
strategies based on steady state queuing analysis,
feedback control theory, and a hybrid mechanism
borrowing ideas from these two. Using real web server
traces, we show that these solutions are more adaptive
to workload behavior when performing server
provisioning and speed control than earlier heuristics
towards minimizing operational costs while meeting the
SLAs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy management; feedback control; performance
modeling; server provisioning",
}
@Article{Ruan:2005:EIS,
author = "Yaoping Ruan and Vivek S. Pai and Erich Nahum and John
M. Tracey",
title = "Evaluating the impact of simultaneous multithreading
on network servers using real hardware",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "315--326",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines the performance of simultaneous
multithreading (SMT) for network servers using actual
hardware, multiple network server applications, and
several workloads. Using three versions of the Intel
Xeon processor with Hyper-Threading, we perform
macroscopic analysis as well as microarchitectural
measurements to understand the origins of the
performance bottlenecks for SMT processors in these
environments. The results of our evaluation suggest
that the current SMT support in the Xeon is application
and workload sensitive, and may not yield significant
benefits for network servers. In general, we find that
enabling SMT on real hardware usually produces only
slight performance gains, and can sometimes lead to
performance loss. In the uniprocessor case, previous
studies appear to have neglected the OS overhead in
switching from a uniprocessor kernel to an SMT-enabled
kernel. The performance loss associated with such
support is comparable to the gains provided by SMT. In
the 2-way multiprocessor case, the higher number of
memory references from SMT often causes the memory
system to become the bottleneck, offsetting any
processor utilization gains. This effect is compounded
by the growing gap between processor speeds and memory
latency. In trying to understand the large gains shown
by simulation studies, we find that while the general
trends for microarchitectural behavior agree with real
hardware, differences in sizing assumptions and
performance models yield much more optimistic benefits
for SMT than we observe.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network server; simultaneous multithreading(SMT)",
}
@Article{Donnet:2005:EAL,
author = "Benoit Donnet and Philippe Raoult and Timur Friedman
and Mark Crovella",
title = "Efficient algorithms for large-scale topology
discovery",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "327--338",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064256",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is a growing interest in discovery of internet
topology at the interface level. A new generation of
highly distributed measurement systems is currently
being deployed. Unfortunately, the research community
has not examined the problem of how to perform such
measurements efficiently and in a network-friendly
manner. In this paper we make two contributions toward
that end. First, we show that standard topology
discovery methods (e.g., skitter) are quite
inefficient, repeatedly probing the same interfaces.
This is a concern, because when scaled up, such methods
will generate so much traffic that they will begin to
resemble DDoS attacks. We measure two kinds of
redundancy in probing (intra- and inter-monitor) and
show that both kinds are important. We show that
straightforward approaches to addressing these two
kinds of redundancy must take opposite tacks, and are
thus fundamentally in conflict. Our second contribution
is to propose and evaluate Doubletree, an algorithm
that reduces both types of redundancy simultaneously on
routers and end systems. The key ideas are to exploit
the tree-like structure of routes to and from a single
point in order to guide when to stop probing, and to
probe each path by starting near its midpoint. Our
results show that Doubletree can reduce both types of
measurement load on the network dramatically, while
permitting discovery of nearly the same set of nodes
and links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cooperative systems; network topology; traceroutes",
}
@Article{Mao:2005:LPI,
author = "Z. Morley Mao and Lili Qiu and Jia Wang and Yin
Zhang",
title = "On {AS}-level path inference",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "339--349",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ability to discover the AS-level path between two
end-points is valuable for network diagnosis,
performance optimization, and reliability enhancement.
Virtually all existing techniques and tools for path
discovery require direct access to the source. However,
the uncooperative nature of the Internet makes it
difficult to get direct access to any remote end-point.
Path inference becomes challenging when we have no
access to the source or the destination. Moreover even
when we have access to the source and know the forward
path, it is nontrivial to infer the reverse path, since
the Internet routing is often asymmetric. In this
paper, we explore the feasibility of AS-level path
inference without direct access to either end-points.
We describe {\em RouteScope\/} --- a tool for inferring
AS-level paths by finding the shortest policy paths in
an AS graph obtained from BGP tables collected from
multiple vantage points. We identify two main factors
that affect the path inference accuracy: the accuracy
of AS relationship inference and the ability to
determine the first AS hop. To address the issues, we
propose two novel techniques: a new AS relation-ship
inference algorithm, and a novel scheme to infer the
first AS hop by exploiting the TTL information in IP
packets. We evaluate the effectiveness of {\em
RouteScope\/} using both BGP tables and the AS paths
collected from public BGP gateways. Our results show
that it achieves 70\%--88\% accuracy in path
inference.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AS-level path; border gateway protocol; internet
routing; network topology",
}
@Article{Zhao:2005:DSA,
author = "Qi (George) Zhao and Abhishek Kumar and Jia Wang and
Jun (Jim) Xu",
title = "Data streaming algorithms for accurate and efficient
measurement of traffic and flow matrices",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "350--361",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The traffic volume between origin/destination (OD)
pairs in a network, known as traffic matrix, is
essential for efficient network provisioning and
traffic engineering. Existing approaches of estimating
the traffic matrix, based on statistical inference
and/or packet sampling, usually cannot achieve very
high estimation accuracy. In this work, we take a brand
new approach in attacking this problem. We propose a
novel data streaming algorithm that can process traffic
stream at very high speed (e.g., 40 Gbps) and produce
traffic digests that are orders of magnitude smaller
than the traffic stream. By correlating the digests
collected at any OD pair using Bayesian statistics, the
volume of traffic flowing between the OD pair can be
accurately determined. We also establish principles and
techniques for optimally combining this streaming
method with sampling, when sampling is necessary due to
stringent resource constraints. In addition, we propose
another data streaming algorithm that estimates {\em
flow matrix}, a finer-grained characterization than
traffic matrix. Flow matrix is concerned with not only
the total traffic between an OD pair (traffic matrix),
but also how it splits into flows of various sizes.
Through rigorous theoretical analysis and extensive
synthetic experiments on real Internet traffic, we
demonstrate that these two algorithms can produce very
accurate estimation of traffic matrix and flow matrix
respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; network measurement; sampling;
statistical inference; traffic matrix",
}
@Article{Soule:2005:TMB,
author = "Augustin Soule and Anukool Lakhina and Nina Taft and
Konstantina Papagiannaki and Kave Salamatian and
Antonio Nucci and Mark Crovella and Christophe Diot",
title = "Traffic matrices: balancing measurements, inference
and modeling",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "362--373",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic matrix estimation is well-studied, but in
general has been treated simply as a statistical
inference problem. In practice, however, network
operators seeking traffic matrix information have a
range of options available to them. Operators can
measure traffic flows directly; they can perform
partial flow measurement, and infer missing data using
models; or they can perform no flow measurement and
infer traffic matrices directly from link counts. The
advent of practical flow measurement makes the study of
these tradeoffs more important. In particular, an
important question is whether judicious modeling,
combined with partial flow measurement, can provide
traffic matrix estimates that are significantly better
than previous methods at relatively low cost. In this
paper we make a number of contributions toward
answering this question. First, we provide a taxonomy
of the kinds of models that may make use of partial
flow measurement, based on the nature of the
measurements used and the spatial, temporal, or
spatio-temporal correlation exploited. We then evaluate
estimation methods which use each kind of model. In the
process we propose and evaluate new methods, and
extensions to methods previously proposed. We show
that, using such methods, small amounts of traffic flow
measurements can have significant impacts on the
accuracy of traffic matrix estimation, yielding results
much better than previous approaches. We also show that
different methods differ in their bias and variance
properties, suggesting that different methods may be
suited to different applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Internet traffic matrix estimation; Kalman filtering;
principal components analysis; statistical inference;
traffic characterization",
}
@Article{Ganeriwal:2005:RAT,
author = "Saurabh Ganeriwal and Deepak Ganesanl and Mark Hansen
and Mani B. Srivastava and Deborah Estrin",
title = "Rate-adaptive time synchronization for long-lived
sensor networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "374--375",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064261",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Time synchronization is critical to sensor networks at
many layers of its design and enables better
duty-cycling of the radio, accurate localization,
beamforming and other collaborative signal processing.
While there has been significant work in sensor network
synchronization, measurement based studies have been
restricted to very short-term (few minutes) datasets
and have focused on obtaining accurate instantaneous
synchronization. Long-term synchronization has
typically been handled by periodic re-synchronization
schemes with beacon intervals of a few minutes based on
the assumption that long-term drift is too hard to
model and predict. Thus, none of this work exploits the
temporally correlated behavior of the clock drift. Yet,
there are incredible energy gains to be achieved from
better modeling and prediction of long-term drift that
can provide bounds on long-term synchronization error
across a sensor network. Better synchronization can
lead to significantly lower duty-cycles of the radio,
simplify signal processing and can enable an order of
magnitude greater lifetime than current techniques. We
measure, evaluate and analyze in-depth the long-term
behavior of synchronization skew and drift on typical
Mica sensor nodes and develop an efficient long-term
time synchronization protocol. We use four real time
data sets gathered over periods of 12-30 hours in
different environmental conditions to study the
interplay between three key parameters that influence
long-term synchronization --- synchronization rate,
history of past synchronization beacons and the
estimation scheme. We use this measurement-based study
to design an online adaptive time-synchronization
algorithm that can adapt to changing clock drift and
environmental conditions while achieving
application-specified precision with very high
probability. We find that our algorithm achieves
between one and two orders of magnitude improvement in
energy efficiency over currently available
time-synchronization approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clock drift; sensor networks; time synchronization",
}
@Article{Wang:2005:IPS,
author = "An-I A. Wang and Peter Reiher and Geoff Kuenning",
title = "Introducing permuted states for analyzing conflict
rates in optimistic replication",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "376--377",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064262",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; conflict rates; optimistic
replication; permuted states; simulation",
}
@Article{Mickens:2005:PNA,
author = "James W. Mickens and Brian D. Noble",
title = "Predicting node availability in peer-to-peer
networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "378--379",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unlike the well-administered servers in traditional
distributed systems, machines in peer-to-peer networks
have widely varying levels of availability. Accurate
modeling of node uptime is crucial for predicting
per-machine resource burdens and selecting appropriate
data replication strategies. In this research project,
we improve upon the accuracy of previous peer-to-peer
availability models, which are often too conservative
to dynamically predict system availability at a
fine-grained level. We test our predictors on
availability traces from the PlanetLab distributed test
bed and the Microsoft corporate network. Each trace has
a distinct predictability profile, and we explain these
differences by examining the fundamental uptime classes
contained in each trace. We also show how
availability-guided replica placement reduces the
amount of object copying in a distributed data store.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "availability prediction; data availability;
distributed object stores; distributed system
simulation; machine availability",
}
@Article{Qiu:2005:TMW,
author = "Lili Qiu and Paramvir Bahl and Ananth Rao and Lidong
Zhou",
title = "Troubleshooting multihop wireless networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "380--381",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064264",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Effective network troubleshooting is critical for
maintaining efficient and reliable network operation.
Troubleshooting is especially challenging in multihop
wireless networks because the behavior of such networks
depends on complicated interactions between many
unpredictable factors such as RF noise, signal
propagation, node interference, and traffic flows. In
this paper we propose a new direction for research on
fault diagnosis in wireless networks. Specifically, we
present a diagnostic system that employs trace-driven
simulations to detect faults and perform root cause
analysis. We apply this approach to diagnose
performance problems caused by packet dropping, link
congestion, external noise, and MAC misbehavior. In a
25 node multihop wireless network, we are able to
diagnose over 10 simultaneous faults of multiple types
with more than 80\% coverage. Our framework is general
enough for a wide variety of wireless and wired
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multihop wireless networks; network diagnosis; network
management; simulation",
}
@Article{Raz:2005:FOM,
author = "David Raz and Benjamin Avi-Itzhak and Hanoch Levy",
title = "Fair operation of multi-server and multi-queue
systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "382--383",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064265",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work aims at studying the fairness of multi-queue
and multi-server queueing systems. We deal with the
issues of queue-multiplicity, queue joining policy and
queue jockeying and use a quantitative measure (RAQFM)
to evaluate them. Our results yield the relative
fairness of the mechanisms as a function of the system
configuration and parameters. Practitioners can use
these results to {\em quantitatively\/} account for
system fairness and to weigh efficiency aspects versus
fairness aspects in designing and controlling their
queueing systems. In particular, we quantitatively
demonstrate that: (1) Joining the shortest queue
increases fairness, (2) A single `combined' queue
system is more fair than `separate' (multi) queue
system and (3) Jockeying from the head of a queue is
more fair than jockeying from its tail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; FCFS; job scheduling; multi-queue;
multi-server; resource allocation; unfairness",
}
@Article{Anderson:2005:DSA,
author = "Eric Anderson and Dirk Beyer and Kamalika Chaudhuri
and Terence Kelly and Norman Salazar and Cipriano
Santos and Ram Swaminathan and Robert Tarjan and Janet
Wiener and Yunhong Zhou",
title = "Deadline scheduling for animation rendering",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "384--385",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064266",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "animation rendering; deadline scheduling; simulation",
}
@Article{He:2005:SSP,
author = "Simin He and Shutao Sun and Wei Zhao and Yanfeng Zheng
and Wen Gao",
title = "Smooth switching problem in buffered crossbar
switches",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "386--387",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064267",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalability considerations drive the switch fabric
design to evolve from output queueing to input queueing
and further to combined input and crosspoint queueing
(CICQ). However, few CICQ switches are known with
guaranteed quality of service, and credit-based flow
control induces a scalability bottleneck. In this
paper, we propose a novel CICQ switch called the
smoothed buffered crossbar or sBUX, based on a new
design objective of smoothness and on a new rate-based
flow control scheme called the smoothed multiplexer or
sMUX. It is proved that with a buffer of just four
cells at each crosspoint, sBUX can utilize 100\% of the
switch capacity to provide deterministic guarantees of
bandwidth and fairness, delay and jitter bounds for
each flow. In particular, neither credit-based flow
control nor speedup is used, and arbitrary
fabric-internal latency is allowed between line cards
and the switch core.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "buffered crossbar; CICQ; scheduling; smoothness;
switch",
}
@Article{He:2005:PTT,
author = "Qi He and Constantinos Dovrolis and Mostafa Ammar",
title = "Prediction of {TCP} throughput: formula-based and
history-based methods",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "388--389",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064268",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chua:2005:SFE,
author = "David Chua and Eric D. Kolaczyk and Mark Crovella",
title = "A statistical framework for efficient monitoring of
end-to-end network properties",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "390--391",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064269",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network service providers and customers are often
concerned with aggregate performance measures that span
multiple network paths. Unfortunately, forming such
network-wide measures can be difficult, due to the
issues of scale involved. As a result, it is of
interest to explore the feasibility of methods that
dramatically reduce the number of paths measured in
such situations while maintaining acceptable accuracy.
In previous work [4] we have proposed a statistical
framework for efficiently addressing this problem. The
key to our method lies in the observation and
exploitation of the fact that network paths show
significant redundancy (sharing of common links).We now
make three contributions in [3]: (1) we generalize the
framework to make it more immediately applicable to
network measurements encountered in practice; (2) we
demonstrate that the observed path redundancy upon
which our method is based is robust to variation in key
network conditions and characteristics, including the
presence of link failures; and (3) we show how the
framework may be applied to address three practical
problems of interest to network providers and
customers, using data from an operating network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "algorithms; networking; statistical analysis",
}
@Article{Zhu:2005:TSA,
author = "Ningning Zhu and Jiawu Chen and Tzi-cker Chiueh and
Daniel Ellard",
title = "{TBBT}: scalable and accurate trace replay for file
server evaluation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "392--393",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064270",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aging; benchmarks; file system evaluation; NFS; trace
play",
}
@Article{Sarat:2005:UAD,
author = "Sandeep Sarat and Vasileios Pappas and Andreas
Terzis",
title = "On the use of anycast in {DNS}",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "394--395",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064271",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the initial results from our evaluation
study on the performance implications of anycast in
DNS, using four anycast servers deployed at top-level
DNS zones. Our results show that 15\% to 55\% of the
queries sent to an anycast group, are answered by the
topologically closest server and at least 10\% of the
queries experience an additional delay in the order of
100ms. While increased availability is one of the
supposed advantages of anycast, we found that outages
can last up to multiple minutes, mainly due to slow BGP
convergence. On the other hand, the number of outages
observed was fairly small, suggesting that anycast
provides a generally stable service.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mudigonda:2005:MMA,
author = "Jayaram Mudigonda and Harrick M. Vin and Raj
Yavatkar",
title = "Managing memory access latency in packet processing",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "396--397",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064272",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this study, we refute the popular belief [1,2] that
packet processing does not benefit from data-caching.
We show that a small data-cache of 8KB can bring down
the packet processing time by much as 50-90\%, while
reducing the off-chip memory bandwidth usage by about
60-95\%. We also show that, unlike general-purpose
computing, packet processing, due to its
memory-intensive nature, cannot rely exclusively on
data-caching to eliminate the memory bottleneck
completely.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data-caches; multithreading; network processors",
}
@Article{Bharambe:2005:SOB,
author = "Ashwin R. Bharambe and Cormac Herley and Venkata N.
Padmanabhan",
title = "Some observations on {BitTorrent} performance",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "398--399",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064273",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a simulation-based study of
BitTorrent. Our results confirm that BitTorrent
performs near-optimally in terms of uplink bandwidth
utilization and download time, except under certain
extreme conditions. On fairness, however, our work
shows that low bandwidth peers systematically download
more than they upload to the network when high
bandwidth peers are present. We find that the {\em
rate-based\/} tit-for-tat policy is not effective in
preventing unfairness. We show how simple changes to
the tracker and a stricter, {\em block-based
tit-for-tat policy}, greatly improves fairness, while
maintaining high utilization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bandwidth utilization; BitTorrent; fairness",
}
@Article{Machiraju:2005:TPC,
author = "Sridhar Machiraju and Darryl Veitch and Fran{\c{c}}ois
Baccelli and Antonio Nucci and Jean C. Bolot",
title = "Theory and practice of cross-traffic estimation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "400--401",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064274",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Active probing heuristics are usually based on queuing
systems. However, a rigorous probabilistic treatment of
probing methods has been lacking. For instance, it is
not known even in principle, what can and cannot be
measured in general, nor the true limitations of
existing methods. We provide a probabilistic treatment
for the measurement of cross traffic in the 1-hop case.
We derive inversion formulae for the cross traffic
process, and explain their fundamental limits, using an
intuitive geometric framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active probing; cross-traffic estimation",
}
@Article{Stutzbach:2005:CTT,
author = "Daniel Stutzbach and Reza Rejaie",
title = "Characterizing the two-tier {Gnutella} topology",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "402--403",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064275",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Characterizing the properties of peer-to-peer (P2P)
overlay topologies in file-sharing applications is
essential for understanding their impact on the
network, identifying their performance bottlenecks in
practice, and evaluating their performance via
simulation. Such characterization requires accurate
snapshots of the overlay topology which is difficult to
capture due to the large size and dynamic nature.
Previous studies characterizing overlay topologies not
only are outdated but also rely on partial or
potentially distorted snapshots. In this extended
abstract, we briefly present the first characterization
of two-tier Gnutella topologies based on recent and
accurate snapshots.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Gnutella; peer-to-peer; topology",
}
@Article{Tewari:2005:ASR,
author = "Saurabh Tewari and Leonard Kleinrock",
title = "Analysis of search and replication in unstructured
peer-to-peer networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "404--405",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064276",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates the effect of the number of
file replicas on search performance in unstructured
peer-to-peer networks. We observe that for a search
network with a random graph topology where file
replicas are uniformly distributed, the hop distance to
a replica of a file is logarithmic in the number of
replicas. Using this observation we show that
flooding-based search is optimized when the number of
replicas is proportional to the file request rates.
This replica distribution is also optimal for download
time and since flooding has logarithmically better
search time than random walk under its optimal replica
distribution, we investigate the query-processing load
using this distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flooding; optimal file replication; peer-to-peer;
random graphs; replication; search performance;
unstructured networks",
}
@Article{Zhang:2005:ILS,
author = "Jianyong Zhang and Anand Sivasubramaniam and Alma
Riska and Qian Wang and Erik Riedel",
title = "An interposed 2-Level {I/O} scheduling framework for
performance virtualization",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "406--407",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064277",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; I/O scheduling; performance isolation;
quality of service; storage systems; virtualization",
}
@Article{Wenisch:2005:TAM,
author = "Thomas F. Wenisch and Roland E. Wunderlich and Babak
Falsafi and James C. Hoe",
title = "{TurboSMARTS}: accurate microarchitecture simulation
sampling in minutes",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "408--409",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent research proposes accelerating processor
microarchitecture simulation through statistical
sampling. Prior simulation sampling approaches
construct accurate model state for each measurement by
continuously warming large microarchitectural
structures (e.g., caches and the branch predictor)
while emulating the billions of instructions between
measurements. This approach, called functional warming,
occupies hours of runtime while the detailed simulation
that is measured requires mere minutes. To eliminate
the functional warming bottleneck, we propose
TurboSMARTS, a simulation framework that stores
functionally-warmed state in a library of small,
reusable checkpoints. TurboSMARTS enables the creation
of the thousands of checkpoints necessary for accurate
sampling by storing only the subset of warmed state
accessed during simulation of each brief execution
window. TurboSMARTS matches the accuracy of prior
simulation sampling techniques (i.e., $ \pm $3\% error
with 99.7\% confidence), while estimating the
performance of an 8-way out-of-order super-scalar
processor running SPEC CPU2000 in 91 seconds per
benchmark, on average, using a 12 GB checkpoint
library.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "checkpointed microarchitecture simulation; simulation
sampling",
}
@Article{Hu:2005:RCM,
author = "Chunyu Hu and Jennifer C. Hou",
title = "A reactive channel model for expediting wireless
network simulation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "410--411",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064279",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major problem with leveraging event-driven,
packet-level simulation environments, such as {\em
ns2\/} [6], {\em J-Sim\/} [1], {\em OpNet\/} [2], and
{\em QualNet\/} [3], in conducting wireless network
simulation is the vast number of events generated, a
majority of which are related to signal transmission in
the PHY/MAC layers. In this extended abstract, we
investigate the operations of signal transmission in
the various stages: {\em signal propagation}, {\em
signal interference}, and {\em interaction with the
PHY/MAC layers}, and identify where events can be
reduced without impairing the accuracy. We propose to
leverage the MAC/PHY state information, and devise
(from the perspective of network simulation) a reactive
channel model (RCM) in which nodes explicitly {\em
register\/} their interests in receiving certain events
according to the MAC/PHY states they are in and the
corresponding operations that should be performed. The
simulation study indicates that RCM renders an order of
magnitude of speed-up without compromising the accuracy
of simulation results. An advantage of RCM with respect
to the implementation is that there is no need to
re-design the channel model for each specific MAC
layer, and the modification made in the MAC/PHY layers
is quite modest (e.g., a few API changes). This,
coupled with the performance gain, suggests that RCM is
an attractive, light-weight mechanism for expediting
wireless network simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel model; network simulation; reactive;
scalability",
}
@Article{Groenevelt:2005:MDM,
author = "Robin Groenevelt and Philippe Nain and Ger Koole",
title = "Message delay in {MANET}",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "412--413",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064280",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A generic stochastic model with only two input
parameters is introduced to evaluate the message delay
in mobile ad hoc networks (MANETs) where nodes may
relay messages. The Laplace--Stieltjes transform (LST)
of the message delay is obtained for two protocols: the
two-hop and the unrestricted multicopy protocol. From
these results we deduce the expected message delays. It
is shown that, despite its simplicity, the model
accurately predicts the message delay under both relay
strategies for a number of mobility models (the random
waypoint, random direction and the random walker
mobility models).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay; estimation; mobile ad hoc; modeling; networks;
performance prediction; statistics",
}
@Article{Squillante:2005:SIW,
author = "Mark S. Squillante",
title = "Special issue on the workshop on {MAthematical
performance Modeling And Analysis (MAMA 2005)}: {Guest
Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "2--2",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101893",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carofiglio:2005:SPA,
author = "Giovanna Carofiglio and Rossano Gaeta and Michele
Garetto and Paolo Giaccone and Emilio Leonardi and
Matteo Sereno",
title = "A statistical physics approach for modelling {P2P}
systems",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "3--5",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We apply basic concepts of statistical physics to
devise an approximate model describing the dynamics of
content diffusion in large peer-to-peer networks. Our
approach is based on fluid-diffusive equations, whose
solution can be obtained by numerical evaluation with a
complexity independent of the number of users and
contents, thus allowing to analyze very large systems.
The model is general and modular, and can incorporate
the effect of both search and download processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sundararaj:2005:OPA,
author = "Ananth I. Sundararaj and Manan Sanghi and John R.
Lange and Peter A. Dinda",
title = "An optimization problem in adaptive virtual
environments",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "6--8",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101895",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A virtual execution environment consisting of virtual
machines (VMs) interconnected with virtual networks
provides opportunities to dynamically optimize, at
run-time, the performance of existing, {\em
unmodified\/} distributed applications without any user
or programmer intervention. Along with resource
monitoring and inference and application-independent
adaptation mechanisms, efficient adaptation algorithms
are key to the success of such an effort. In previous
work we have described our measurement and inference
framework, explained our adaptation mechanisms, and
proposed simple heuristics as adaptation algorithms.
Though we were successful in improving performance as
compared to the case with no adaptation, none of our
algorithms were characterized by theoretically proven
bounds. In this paper, we formalize the adaptation
problem, show that it is NP-hard and propose research
directions for coming up with an efficient solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:2005:OPC,
author = "David M. Nicol",
title = "Optimized pre-copy calibration of hard drives",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "9--11",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In certain contexts a small window of time exists
during which law enforcement has access to a hard-drive
suspected of containing important information. Given
legal authority to copy or seize this disk, a decision
must be made whether to use that access time to make a
copy of the disk (which may take more than an hour,
depending on the size of the disk) and leave its owner
unaware that it has been copied. The copying operation
uses especially fast drivers that bypass normal error
correction mechanisms. Therefore, for the copy to be
successful it is necessary that the disk onto which the
copy is placed yield exactly the same bits on
subsequent reads as would the original disk. To gain
confidence that the copy will be successful the copying
software typically chooses some sectors at random,
copies them, and determines whether their copies are
identical to the original. We address the problem of
quantifying the conditional probability that the disk
will copy correctly given that some samples have copied
correctly, as a function of the, number and placement
of those samples. Our framework allows us then to
choose the placement of those samples in such a way
that this conditional probability is maximized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumaran:2005:SAC,
author = "J. Kumaran and K. Mitchell and A. van de Liefvoort",
title = "A spectral approach to compute performance measures in
a correlated single server queue",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "12--14",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101897",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The coupling matrix was introduced in [8] to compute
the performance measures of a PH/PH/1 single server
queue. This matrix was extended in [1, 2] to include
arrival and service processes that are possibly
serially correlated processes, although the service
process remains independent of the arrival process and
all marginal distributions are matrix exponential, and
this current paper is an extended abstract of [2]. The
coupling matrix is constructed from the arrival and the
service distributions without any computational effort,
and the performance measures (such as waiting times and
queue length distributions) are derived directly from
its spectrum.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiorini:2005:UCS,
author = "Pierre M. Fiorini and Robert Sheahan and Lester
Lipsky",
title = "On unreliable computing systems when heavy-tails
appear as a result of the recovery procedure",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "15--17",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For some computing systems, failure is rare enough
that it can be ignored. In other systems, failure is so
common that how to handle it can have a significant
impact on the performance of the system. There are many
different recovery schemes for tasks, however, they can
be classified into three broad categories: (1) {\em
Resume\/}: when a task fails, it knows exactly where it
stops and can continue at that point when allowed to
resume (i.e., {\em preemptive resume --- (prs)\/}); (2)
{\em Replace\/}: when a task fails, then later when the
processor continues, it begins with a brand new task
(i.e., {\em preemptive repeat different (prd)\/};) and,
(3) {\em Restart\/}: when a task fails it loses all
work done to that point and must start anew upon
continuing later (i.e., {\em preemptive repeat
identical --- pri\/}).In this paper, assuming a
computing system is unreliable, we discuss how {\em
heavy-tail\/} (hereafter referred to as {\em
power-tail\/} --- PT) distributions can appear in a
job's task stream given the {\em Restart\/} recovery
procedure. This is an important consideration since it
is known that power-tails can lead to unstable systems
[4], We then demonstrate how to obtain performance and
dependability measures for a class of computing systems
comprised of $P$ unreliable processors and a finite
number of tasks $N$ given the above recovery
procedures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2005:MDP,
author = "Qi Zhang and Armin Heindl and Evgenia Smirni",
title = "Models of the departure process of a {BMAP/MAP/1}
queue",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "18--20",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a family of finite approximations for the
departure process of a BMAP/MAP/1 queue. The departure
process approximations are derived via an exact
aggregate solution technique (called ETAQA) applied to
M/G/1-type Markov processes. The proposed
approximations are indexed by a parameter $n$ ($ n <
1$), which determines the size of the output model as $
n + 1$ block levels of the M/G/1-type process. This
output approximation preserves exactly the marginal
distribution of the true departure process and the lag
correlations of the inter-departure times up to lag $ n
- 2$. Experimental results support the applicability of
the proposed approximation in traffic-based
decomposition of queueing networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramachandran:2005:PBA,
author = "Krishna K. Ramachandran and Biplab Sikdar",
title = "A population based approach to model network lifetime
in wireless sensor networks",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "21--23",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The physical constraints of battery-powered sensors
impose limitations on their processing capacity and
longevity. As battery power in the nodes decays,
certain parts of the network may become disconnected or
the coverage may shrink, thereby reducing the
reliability and the potency of the sensor network.
Since sensor networks operate unattended and without
maintenance, it is imperative that network failures are
detected early enough so that corrective measures can
be taken. Existing research has primarily concentrated
on developing algorithms, be it distributed or
centralized, to optimize network longevity metrics. For
instance, [4, 5] propose MAC layer optimizations to
prolong longevity, while [7, 6] look at the problem
from a Layer 3 perspective. Works along the lines of
actually building network models for energy consumption
are addressed in [2], [3], but these models fail to
capture the interplay between a node's spatial location
and it's energy consumption. In our current work, we
develop an unifying framework to characterize the
lifetime of such energy constrained networks, and
obtain insights into their working. In particular, we
employ a framework similar to population models for
biological systems, to model the network lifetime. We
consider both {\em spatial\/} scenarios, where a node's
power consumption is governed by it's position in space
as well as {\em nonspatial\/} scenarios, where the
node's location and power consumption model are
independent entities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kamra:2005:DPS,
author = "Abhinav Kamra and Jon Feldman and Vishal Misra and Dan
Rubenstein",
title = "Data persistence in sensor networks: towards optimal
encoding for data recovery in partial network
failures",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "24--26",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sensor networks consist of a number of sensors spread
across a geographical area. Each sensor has
communication capability and some level of intelligence
for signal processing and networking of data. Each
sensor node in the network routinely `senses' and
stores data from its immediate environment. An
important requirement of the sensor network is that the
collected data be disseminated to the proper end users.
In some cases, there are fairly strict requirements on
this communication. For example, the detection of an
intruder in a surveillance network should be
immediately communicated to the police authorities.
Each sensor node also has some storage capacity to
store the collected data or to assemble the data prior
to communicating it to another node.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2005:ION,
author = "Wenjie Jiang and John C. S. Lui and Dah-Ming Chiu",
title = "Interaction of overlay networks: properties and
implications",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "27--29",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although the concept of application layer overlay
routing has received much attention lately, there has
been little focus on the {\em `coexistence'\/} and {\em
`interaction'\/} of overlays on top of the same
physical network. In this paper, we show that when each
overlay plays the optimal routing strategy so as to
optimize its own performance, there exists an
equilibrium point for the overall routing strategy.
However, the equilibrium may be {\em inefficient:\/}
(a) it may not be Pareto optimal, (b) some fairness
anomalies of resource allocation may occur. This is
worthy of attention since overlays can be easily
deployed and overlays may not know the existence of
each other, they may continue to operate at a
sub-optimal point.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2005:CNC,
author = "Richard T. B. Ma and Vishal Misra and Dan Rubenstein",
title = "Cooperative and non-cooperative models for
slotted-{Aloha} type {MAC} protocols",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "30--32",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Aloha [1] and its slotted variation [3] have been
widely deployed as a medium access control (MAC)
protocol for different communication networks.
Slotted-Aloha type MAC protocols don't perform carrier
sensing and synchronize the transmissions into
time-slots. These protocols are suitable for
controlling, multiple accesses when nodes cannot sense
each other. Recent development of wireless and sensor
networks urges us to re-investigate slotted-Aloha type
MAC, and to design its variations for these new
trends.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Covell:2005:PMS,
author = "Michele Covell and Sumit Roy and Beomjoo Seo",
title = "Predictive modeling of streaming servers",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "33--35",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe our approach to deriving
saturation models for streaming servers from
vector-labeled training data. If a streaming server is
driven into saturation by accepting too many clients,
the quality of service degrades across the sessions.
The actual saturating load on a streaming server
depends on the detailed characteristics of the client
requests: the content location (local disk or stream
relay), the relative popularity, and the bit and packet
rates [1]. Previous work in streaming-server models has
used carefully selected, low-dimensional measurements,
such as client jitter and rebuffering counts [2], or
server memory usage [3]. In contrast, we collect 30
distinct low-level measures and 210 nonlinear
derivative measures each second. This provides us with
robustness against outliers, without reducing
sensitivity or responsiveness to changes in load. Since
the measurement dimensionality is so high, our approach
requires the modeling and learning framework described
in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2005:RTP,
author = "Mor Harchol-Balter and Takayuki Osogami and Alan
Scheller-Wolf",
title = "Robustness of threshold policies in beneficiary-donor
model",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "36--38",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common problem in multiserver systems is deciding
how to allocate resources among jobs so as to minimize
mean response time. Since good parameter settings
typically depend on environmental conditions such as
system loads, an allocation policy that is optimal in
one environment may provide poor performance when the
environment changes, or when the prediction of the
environment is wrong. We say that such a policy is not
{\em robust.\/} In this paper, we analytically compare
the robustness of several threshold-based allocation
policies, in a dual server beneficiary-donor model. We
introduce two types of robustness: {\em static
robustness}, which measures robustness against
mis-estimation of the true load, and {\em dynamic
robustness}, which measures robustness against
fluctuations in the load. We find that policies
employing multiple thresholds offer significant benefit
over single threshold policies with respect to static
robustness. Yet they surprisingly offer much less
benefit with respect to dynamic robustness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raz:2005:LRU,
author = "David Raz and Benjamin Avi-Itzhak and Hanoch Levy",
title = "Locality of reference and the use of sojourn time
variance for measuring queue unfairness: extended
abstract",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "39--41",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The variance of customer sojourn time (or waiting
time) is used, either explicitly or implicitly, as an
indication of fairness for as long as queueing theory
exists. In this work we demonstrate that this quantity
has a disadvantage as a fairness measure, since it is
not local to the busy period in which it is measured.
It therefore may account for customer discrepancies
which are not relevant to fairness of scheduling. We
show that RAQFM, a recently proposed job fairness
measure, does possess such a locality property. We
further show that within a large class of fairness
measures RAQFM is unique in possessing this property.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2005:DSO,
author = "Yingdong Lu and Mark S. Squillante",
title = "Dynamic scheduling to optimize utility functions of
sojourn time moments in queueing systems",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "42--44",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well known that scheduling the service of
customers according to the shortest remaining
processing time (SRPT) policy is optimal with respect
to minimizing the mean sojourn time of customers.
Recent studies have further argued that SRPT does not
unfairly penalize large customers in order to benefit
small customers, and therefore these studies propose
the use of SRPT to improve performance in various
applications. However, as Schrage and Miller point out
[10], the SRPT policy can raise several difficulties
for a number of important reasons. Such difficulties
can arise from the inability to accurately predict
service times, or the complicated nature of
implementing the preemptive aspect of the SRPT policy
which requires keeping track of the remaining service
times of all waiting customers as well as of the
customer in service. Normally, preemption also incurs
additional costs. and thus one might want to avoid the
preemption of customers in service whose remaining
service time is not much larger than that of a new
arrival.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papagiannaki:2005:GEF,
author = "Konstantina Papagiannaki and Yin Zhang",
title = "{Guest Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "2--2",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, large scale network inference has
attracted significant interest within the research
community. On one front, considerable progress has been
made on traffic matrix estimation. Solutions have been
proposed to estimate the amount of traffic flowing
between any pair of ingress and egress points within an
IP network simply based on the total amount of traffic
recorded over IP links. On another front, efforts are
being made to detect the state of the network from end
to end measurements using inference techniques or to
infer the traffic workload by exploiting application
behavior. In essence, the full instrumentation of the
state of an IP network is still considered a cost
prohibitive task and inference may be the only tool we
have to understand the behavior of such large scale
systems. The potential benefits of the proposed
estimation techniques can be great. Accurate
measurement of an IP traffic matrix is essential for
network design and planning. Moreover, accurate
estimation of the network state can facilitate
troubleshooting and performance evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandramouli:2005:ANC,
author = "Y. Chandramouli and Arnold Neidhardt",
title = "Analysis of network congestion inference techniques",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "3--9",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a neutral observer monitoring network
performance based on external network measurements.
Whenever congestion symptoms are observed within the
network, the neutral observer would be interested in
diagnosing the cause of the symptom, and in particular,
identifying the congested link within the network. The
neutral observer may contemplate to collect external
network measurements reflective of network performance
and from those measurements infer link delays to
identify the congested link. Given the measurements
collected, the following result has been obtained in
this article. We prove that it is not possible to
determine one-way link delays based on external network
delay measurements. It is important to note that it is
possible to determine one-way link delays with more
information such as historical data or additional
assumptions about directional delays.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burch:2005:MLD,
author = "Hal Burch and Chris Chase",
title = "Monitoring link delays with one measurement host",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "10--17",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present RCM, a system to monitor link delays on a
network using a single measurement host. RCM is a
combination of a new measurement system and a new
network tomography technique. The measurement system
employs tunnels to connect to border routers where it
can source and sink measurements across the network.
RCM uses network tomography to calculate the delays
across individual network links from these
measurements. The network tomography technique expands
on previous linear algebra techniques to deal with the
limitations of the resulting data without assuming
either link delay symmetry or a particular topology.
The network tomographic technique is compared against
direct measurements in simulation to ensure accuracy.
RCM is deployed on a large ISP's network to diagnose
the cause of end-to-end delays, from which additional
results are presented. The results are compared against
known behaviors of the network to ensure the results
are consistent with those behaviors. The system is
analyzed for its ability to pin-point the cause of
changes in end-to-end delay.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2005:OCS,
author = "Baek-Young Choi and Supratik Bhattacharyya",
title = "Observations on {Cisco} sampled {NetFlow}",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "18--23",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic monitoring is an important first step for
network management and traffic engineering. With
high-speed Internet backbone links, efficient and
effective packet sampling is not only desirable, but
also increasingly becoming a necessity. The Sampled
NetFlow [10] is Cisco router's traffic measurement
functionality with static packet sampling for high
speed links. Since the utility of sampling depends on
the {\em accuracy\/} and {\em economy\/} of
measurement, it is important to understand sampling
error and measurement overhead. In this paper, we first
discuss fundamental limitations of sampling techniques
used in the Sampled NetFlow. We assess the accuracy of
the Sampled NetFlow by comparing its output with
complete packet traces [8] from an operational router.
We also show the overheads involved in the Sampled
NetFlow. We find that Sampled NetFlow performs
correctly without incurring dramatic overhead during
our experiments. However, a care should be taken in its
use, since the overhead is linearly proportional to the
number of flow records.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soule:2005:TMT,
author = "Augustin Soule and Kav{\'e} Salamatian and Antonio
Nucci and Nina Taft",
title = "Traffic matrix tracking using {Kalman} filters",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "24--31",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we develop a new approach to monitoring
origin-destination flows in a large network. We start
by building a state space model for OD flows that is
rich enough to fully capture temporal and spatial
correlations. We apply a Kalman filter to our linear
dynamic system that can be used for both estimation and
prediction of traffic matrices. We call our system a
traffic matrix tracker due to its lightweight mechanism
for temporal updates that enables tracking traffic
matrix dynamics at small time scales. Our Kalman filter
approach allows us to go beyond traffic matrix
estimation in that our single system can also carry out
traffic prediction and yield confidence bounds on the
estimates, the predictions and the residual error
processes. We show that these elements provide key
functionalities needed by monitoring systems of the
future for carrying out anomaly detection. Using real
data collected from a Tier-1 ISP, we validate our
model, illustrate that it can achieve low errors, and
that our method is adaptive on both short and long
timescales.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lance:2005:RTT,
author = "Ryan Lance and Ian Frommer",
title = "Round-trip time inference via passive monitoring",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "32--38",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The round-trip time and congestion window are the most
important rate-controlling variables in TCP. We present
a novel method for estimating these variables from
passive traffic measurements. The method uses four
different techniques to infer the minimum round-trip
time based the pacing of a limited number of packets.
We then estimate the sequence of congestion windows and
round-trip times for the whole flow. We validate our
algorithms with the ns2 network simulator.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lawrence:2005:LAN,
author = "Earl Lawrence and George Michailidis and Vijay N.
Nair",
title = "Local area network analysis using end-to-end delay
tomography",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "39--45",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been considerable interest over the last few
years in collecting and analyzing Internet traffic data
in order to estimate quality of service parameters such
as packet loss rates and delay distributions. In this
paper, we focus on fast and efficient estimation
methods for network link delay distributions based on
end-to-end measurements obtained by probing the
underlying. We introduce a rigorous statistical
framework for designing the necessary probing
experiments and examine the properties of the proposed
estimators. The proposed framework and the resulting
methodology are validated using data collected on the
University of North Carolina (UNC) local area
network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tian:2005:TAL,
author = "Wenhong Tian",
title = "The transient analysis of loss networks",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "46--50",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unlike stationary behavior, time-dependent blocking
probabilities for loss networks are not well understood
and little work has been done except for the single
service center case. We propose novel closed-form
transient analysis methods for single Erlang loss
system and networks, to the best of our knowledge,
these are the most efficient ways to analyze the
transient behavior of Erlang loss system and networks.
Applying this model, time-dependent provisioning can
satisfy dynamically changed traffic demands and avoid
overprovisioning problem in connection-oriented loss
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiedler:2005:TMT,
author = "Daniel Fiedler and Kristen Walcott and Thomas
Richardson and Gregory M. Kapfhammer and Ahmed Amer and
Panos K. Chrysanthis",
title = "Towards the measurement of tuple space performance",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "51--62",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many applications rely upon a tuple space within
distributed system middleware to provide loosely
coupled communication and service coordination. This
paper describes an approach for measuring the
throughput and response time of a tuple space when it
handles concurrent local space interactions.
Furthermore, it discusses a technique that populates a
tuple space with tuples before the execution of a
benchmark in order to age the tuple space and provide a
worst-case measurement of space performance. We apply
the tuple space benchmarking and aging methods to the
measurement of the performance of a JavaSpace, a
current example of a tuple space that integrates with
the Jini network technology. The experiment results
indicate that: (i) the JavaSpace exhibits limited
scalability as the number of concurrent interactions
from local space clients increases, (ii) the aging
technique can operate with acceptable time overhead,
and (iii) the aging technique does ensure that the
results from benchmarking capture the worst-case
performance of a tuple space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2006:GEF,
author = "Alma Riska and Erik Riedel",
title = "{Guest Editor}'s foreword: bigger and faster and
smaller",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "2--3",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years storage systems have evolved
dramatically as a result of both social and technical
advances. Storage systems and storage devices are found
in almost any computing installation from large
centralized and distributed enterprise systems to a
variety of mobile consumer electronic devices. Such a
wide deployment of storage has created a need to
re-evaluate basic solutions in storage systems design
and implementation. As part of this ongoing process of
technology evolution, it is critical to find a
framework to identify, understand, and evaluate a range
of issues. The reliability, availability, scalability,
performance, and power consumption characteristics of
storage systems must be considered in a variety of
traditional and emerging computing environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keeton:2006:CMD,
author = "Kimberly Keeton and Arif Merchant",
title = "Challenges in managing dependable data systems",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "4--10",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work shows how to automatically design storage
systems that meet performance and dependability
requirements by appropriately selecting and configuring
storage devices, and creating snapshot, remote mirror,
and traditional backup copies. Although this work
represents a solid foundation, users demand an even
higher level of functionality: the ability to
cost-effectively manage data according to
application-centric (or better, business
process-centric) performance, dependability and
manageability requirements, as these requirements
evolve over the data's lifetime. In this paper, we
outline several research challenges in managing
dependable data systems, including capturing users'
high-level goals; translating them into storage-level
requirements; and designing, deploying, and analyzing
the resulting data systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2006:ACT,
author = "Jianyong Zhang and Prasenjit Sarkar and Anand
Sivasubramaniam",
title = "Achieving completion time guarantees in an
opportunistic data migration scheme",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "11--16",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today's data centers are in a constant state of
evolution because of equipment refreshes and the move
to tiered storage. Data migration is a very important
activity in this environment as data moves from one
storage device to another without disrupting access
from applications. This paper presents the design,
implementation, and evaluation of a migration scheme
that provides completion time guarantees for a
migration task and also minimizes its impact on
foreground applications. This scheme is based on an
opportunistic data migration scheme that consider
migration as background activities. To make sure that a
migration task obeys a completion time constraint, an
adaptive rate control mechanism is presented. The
scheme uses various statistical techniques to estimate
system capacities, and utilize these estimates to
regulate foreground activities. Trace-driven
experimental evaluation shows that our migration scheme
is able to ensure that the migration task completes in
time while minimizing the impact on foreground
application activity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:2006:MLR,
author = "Alexander Thomasian",
title = "Multi-level {RAID} for very large disk arrays",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "17--22",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138091",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Very Large Disk Arrays --- VLDAs have been developed
to cope with the rapid increase in the volume of data
generated requiring ultrareliable storage. Bricks or
Storage Nodes --- SNs holding a dozen or more disks are
cost effective VLDA building blocks, since they cost
less than traditional disk arrays. We utilize the
Multilevel RAID --- MRAID paradigm for protecting both
SNs and their disks. Each SN is a
$k$-disk-failure-tolerant kDFT array, while replication
or $l$-node failure tolerance --- $l$ NFTs paradigm is
applied at the SN level. For example, RAID1(M)/5(N)
denotes a RAID1 at the higher level with a degree of
replication $M$ and each virtual disk is an SN
configured as a RAID5 with $N$ physical disks. We
provide the data layout for RAID5/5 and RAID6/5 MRAIDs
and give examples of updating data and recovering lost
data. The former requires {\em storage transactions\/}
to ensure the atomicity of storage updates. We discuss
some weaknesses in reliability modeling in RAID5 and
give examples of an asymptotic expansion method to
compare the reliability of several MRAID organizations.
We outline the reliability analysis of Markov chain
models of VLDAs and briefly report on conclusions from
simulation results. In Conclusions we outline areas for
further research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mesnier:2006:RFM,
author = "Michael Mesnier and Matthew Wachs and Brandon Salmon
and Gregory R. Ganger",
title = "Relative fitness models for storage",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "23--28",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Relative fitness is a new black-box approach to
modeling storage devices. Whereas conventional
black-box models train to predict a device's
performance given `device-independent' workload
characteristics, relative fitness models learn to
predict the {\em changes\/} in performance between
specific devices. There are two advantages. First,
unlike conventional modeling, relative fitness does not
depend entirely on workload characteristics;
performance and resource utilization (e.g., cache
usage) can also be used to describe a workload. This is
beneficial when workload characteristics are difficult
to express (e.g., temporal locality). Second, because
relative fitness models are constructed for each pair
of devices, changes in workload characteristics (e.g.,
I/O inter-arrival delay) can be modeled. Therefore,
unlike a conventional model, a relative fitness model
can be used by applications with a {\em closed\/} I/O
arrival process. In this article, we present relative
fitness as an evolution of the conventional model and
share some early results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arpaci-Dusseau:2006:SSD,
author = "Andrea C. Arpaci-Dusseau and Remzi H. Arpaci-Dusseau
and Lakshmi N. Bairavasundaram and Timothy E. Denehy
and Florentina I. Popovici and Vijayan Prabhakaran and
Muthian Sivathanu",
title = "Semantically-smart disk systems: past, present, and
future",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "29--35",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138093",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe research that has been
on-going within our group for the past four years on
{\em semantically-smart disk systems}. A
semantically-smart system goes beyond typical
block-based storage systems by extracting higher-level
information from the stream of traffic to disk; doing
so enables new and interesting pieces of functionality
to be implemented within low-level storage systems. We
first describe the development of our efforts over the
past four years, highlighting the key technologies
needed to build semantically-smart systems as well as
the main weaknesses of our approach. We then discuss
future directions in the design and implementation of
smarter storage systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2006:BDS,
author = "Eitan Bachmat and Vladimir Braverman",
title = "Batched disk scheduling with delays",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "36--41",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the important performance enhancing
capabilities of modern disk drives, is the ability to
permute the order of service of incoming I/O requests
in order to minimize total access time. Given a batch
(set) of I/O requests, the problem of finding the
optimal order of service is known as the {\em Batched
Disk Scheduling Problem\/} (BDSP). BDSP is a well known
instance of the Asymmetric Traveling Salesman Problem
(ATSP), in fact it has been used as one of a few
principal test cases for the examination of heuristic
algorithms for the ATSP, [4], [12]. To specify an
instance of BDSP amounts to a choice of a model for the
mechanical motion of the disk and a choice of locations
and lengths of the requested I/O in the batch. The
distance between requests is the amount of time needed
by the disk to move from the end of one request to the
beginning of the other, thus the amount of time needed
to read the data itself, {\em Transfer time}, is not
counted since it is independent of the order of the
requests, only the order dependent {\em Access time\/}
is computed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zarandioon:2006:OOD,
author = "Saman Zarandioon and Alexander Thomasian",
title = "Optimization of online disk scheduling algorithms",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "42--46",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk scheduling policies have a significant effect on
disk performance. SPTF is one of the well-known
policies that can increase disk performance near to
optimality [1]. One of the drawbacks of the regular
implementation of SPTF is its high computational cost.
`The computational cost [of SPTF] (as indicated crudely
by our simulation times) is very high' [2]. This paper
shows that computational cost of SPTF is not the
characteristic of SPTF, but it is a matter of
implementation. The experience shows that this approach
can improve the efficiency over 80\% compared to
na{\"\i}ve implementation. Finally, an algorithm for
efficient implementation of lookahead algorithms is
introduced.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk scheduling; online scheduling; shortest
positioning time first; SPTF",
}
@Article{Reed:2006:PRU,
author = "Daniel A. Reed",
title = "Performance and reliability: the ubiquitous
challenge",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "1--2",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Legend says that Archimedes remarked, on the discovery
of the lever, `Give me a place to stand and I can move
the world.' Today, computing pervades all aspects of
society. `Science' and `computational science' have
become largely synonymous, and computing is the
intellectual lever that opens the pathway to discovery
in diverse domains. As new discoveries increasingly lie
at the interstices of traditional disciplines,
computing is also the enabler for scholarship in the
arts, humanities, creative practice and public policy.
Equally importantly, computing supports our critical
infrastructure, from monetary and communication systems
to the electric power grid. With such pervasive
dependence, computing system reliability and
performance are ever more critical. Although the mean
time before failure (MTBF) of commodity hardware
components (i.e., processors, disks, memories, power
supplies and networks) is high, their use in large,
mission critical systems can still lead to systemic
failures. Our thesis is that the `two worlds' of
software --- distributed systems and
sequential/parallel systems --- must meet, embodying
ideas from each, if we are to build resilient systems.
This talk surveys some of these challenges and presents
possible approaches for resilient design, ranging from
intelligent hardware monitoring and adaptation, through
low-overhead recovery schemes, statistical sampling and
differential scheduling and to alternative models of
system software, including evolutionary adaptation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thereska:2006:STA,
author = "Eno Thereska and Brandon Salmon and John Strunk and
Matthew Wachs and Michael Abd-El-Malek and Julio Lopez
and Gregory R. Ganger",
title = "{Stardust}: tracking activity in a distributed storage
system",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "3--14",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140280",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance monitoring in most distributed systems
provides minimal guidance for tuning, problem
diagnosis, and decision making. Stardust is a
monitoring infrastructure that replaces traditional
performance counters with end-to-end traces of requests
and allows for efficient querying of performance
metrics. Such traces better inform key administrative
performance challenges by enabling, for example,
extraction of per-workload, per-resource demand
information and per-workload latency graphs. This paper
reports on our experience building and using end-to-end
tracing as an on-line monitoring tool in a distributed
storage system. Using diverse system workloads and
scenarios, we show that such fine-grained tracing can
be made efficient (less than 6\% overhead) and is
useful for on- and off-line analysis of system
behavior. These experiences make a case for having
other systems incorporate such an instrumentation
framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "end-to-end tracing; request causal chain; Ursa Minor",
}
@Article{Pinheiro:2006:ERC,
author = "Eduardo Pinheiro and Ricardo Bianchini and Cezary
Dubnicki",
title = "Exploiting redundancy to conserve energy in storage
systems",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "15--26",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140281",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper makes two main contributions. First, it
introduces Diverted Accesses, a technique that
leverages the redundancy in storage systems to conserve
disk energy. Second, it evaluates the previous
(redundancy-oblivious) energy conservation techniques,
along with Diverted Accesses, as a function of the
amount and type of redundancy in the system. The
evaluation is based on novel analytic models of the
energy consumed by the techniques. Using these energy
models and previous models of reliability,
availability, and performance, we can determine the
best redundancy configuration for new energy-aware
storage systems. To study Diverted Accesses for
realistic systems and workloads, we simulate a
wide-area storage system under two file-access traces.
Our modeling results show that Diverted Accesses is
more effective and robust than the redundancy-oblivious
techniques. Our simulation results show that our
technique can conserve 20-61\% of the disk energy
consumed by the wide-area storage system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk energy; energy management; energy modeling",
}
@Article{Modiano:2006:MTW,
author = "Eytan Modiano and Devavrat Shah and Gil Zussman",
title = "Maximizing throughput in wireless networks via
gossiping",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "27--38",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140283",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major challenge in the design of wireless networks
is the need for distributed scheduling algorithms that
will efficiently share the common spectrum. Recently, a
few distributed algorithms for networks in which a node
can converse with at most a single neighbor at a time
have been presented. These algorithms guarantee 50\% of
the maximum possible throughput. We present the {\em
first distributed scheduling framework that guarantees
maximum throughput}. It is based on a combination of a
distributed matching algorithm and an algorithm that
compares and merges successive matching solutions. The
comparison can be done by a deterministic algorithm or
by randomized gossip algorithms. In the latter case,
the comparison may be inaccurate. Yet, we show that if
the matching and gossip algorithms satisfy simple
conditions related to their performance and to the
inaccuracy of the comparison (respectively), the
framework attains the desired throughput. It is shown
that the complexities of our algorithms, that achieve
nearly 100\% throughput, are comparable to those of the
algorithms that achieve 50\% throughput. Finally, we
discuss extensions to general interference models. Even
for such models, the framework provides a simple
distributed throughput optimal algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithms; gossip algorithms; matching;
scheduling; stability; wireless networks",
}
@Article{Gao:2006:DEE,
author = "Yan Gao and Dah-Ming Chiu and John C. S. Lui",
title = "Determining the end-to-end throughput capacity in
multi-hop networks: methodology and applications",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "39--50",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140284",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a methodology to
analytically compute the {\em throughput capacity}, or
the maximum end-to-end throughput of a given source and
destination pair in a multi-hop wireless network. The
end-to-end throughput capacity is computed by
considering the interference due to neighboring nodes,
as well as various modes of hidden node interference.
Knowing the throughput capacity is important because it
facilitates the design of routing policy, admission
control for realtime traffic, as well as load control
for wireless networks. We model location-dependent
neighboring interference and we use a contention graph
to represent these interference relationships. Based on
the contention graph, we formulate the individual link
capacity as a set of fixed point equations. The
end-to-end throughput capacity can then be determined
once these link capacities are obtained. To illustrate
the utility of our proposed methodology, we present two
important applications: (a) {\em route optimization\/}
to determine the path with the maximum end-to-end
throughput capacity and, (b) {\em optimal offered load
control\/} for a given path so that the maximum
end-to-end capacity can be achieved. Extensive
simulations are carried out to verify and validate the
proposed analytical methodology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical model for 802.11 protocols; multi-hop ad
hoc wireless networks; throughput capacity",
}
@Article{Koksal:2006:ICV,
author = "Can Emre Koksal and Kyle Jamieson and Emre Telatar and
Patrick Thiran",
title = "Impacts of channel variability on link-level
throughput in wireless networks",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "51--62",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140285",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study analytically and experimentally the
throughput of the packetized time-varying discrete
erasure channel with feedback, which closely captures
the behavior of many practical physical layers. We
observe that the channel variability at different time
scales affects the link-level throughput positively or
negatively depending on its time scale. We show that
the increased variability in the channel at a time
scale smaller than a single packet increases the
link-level throughput, whereas the variability at a
time scale longer than a single packet reduces it. We
express the throughput as a function of the number of
transmissions per packet and evaluate it as in terms of
the cumulants of the samples of the stochastic
processes, which model the channel. We also illustrate
our results experimentally using mote radios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel modelling; channel variability; link
estimation",
}
@Article{Mishra:2006:POC,
author = "Arunesh Mishra and Vivek Shrivastava and Suman
Banerjee and William Arbaugh",
title = "Partially overlapped channels not considered harmful",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "63--74",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140286",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many wireless channels in different technologies are
known to have partial overlap. However, due to the
interference effects among such partially overlapped
channels, their simultaneous use has typically been
avoided. In this paper, we present a first attempt to
model partial overlap between channels in a systematic
manner. Through the model, we illustrate that the use
of partially overlapped channels is not always harmful.
In fact, a careful use of some partially overlapped
channels can often lead to significant improvements in
spectrum utilization and application performance. We
demonstrate this through analysis as well as through
detailed application-level and MAC-level measurements.
Additionally, we illustrate the benefits of our
developed model by using it to directly enhance the
performance of two previously proposed channel
assignment algorithms --- one in the context of
wireless LANs and the other in the context of multi-hop
wireless mesh networks. Through detailed simulations,
we show that use of partially overlapped channels in
both these cases can improve end-to-end application
throughput by factors between 1.6 and 2.7 in different
scenarios, depending on wireless node density. We
conclude by observing that the notion of partial
overlap can be the right model of flexibility to design
efficient channel access mechanisms in the emerging
software radio platforms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel assignment; IEEE 802.11; partially overlapped
channels",
}
@Article{Lieshout:2006:GSS,
author = "P. Lieshout and M. Mandjes and S. Borst",
title = "{GPS} scheduling: selection of optimal weights and
comparison with strict priorities",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "75--86",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140288",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system with two service classes with
heterogeneous traffic characteristics and
Quality-of-Service requirements. The available
bandwidth is shared between the two traffic classes in
accordance with the Generalized Processor Sharing (GPS)
discipline. GPS-based scheduling algorithms, such as
Weighted Fair Queueing, provide a popular mechanism for
service differentiation among heterogeneous traffic
classes. While the performance of GPS for given weights
has been thoroughly examined, the problem of selecting
weight values that maximize the traffic-carrying
capacity, has only received limited attention so far.
In the present paper, we address the latter problem for
the case of general Gaussian traffic sources. Gaussian
models cover a wide variety of both long-range
dependent and short-range dependent processes, and are
especially suitable at relatively high levels of
aggregation. In particular, we determine the realizable
region, i.e., the combinations of traffic sources that
can be supported for given Quality-of-Service
requirements in terms of loss and delay metrics. The
results yield the remarkable observation that simple
priority scheduling strategies achieve nearly the full
realizable region.$^1$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admissible region; Gaussian traffic; generalized
processor sharing; loss probabilities; priority
scheduling; weight setting",
}
@Article{Gromoll:2006:IRP,
author = "H. Christian Gromoll and Philippe Robert and Bert
Zwart and Richard Bakker",
title = "The impact of reneging in processor sharing queues",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "87--96",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140289",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate an overloaded processor sharing queue
with renewal arrivals and generally distributed service
times. Impatient customers may abandon the queue, or
renege, before completing service. The random time
representing a customer's patience has a general
distribution and may be dependent on his initial
service time requirement. We propose a scaling
procedure that gives rise to a fluid model, with
nontrivial yet tractable steady state behavior. This
fluid model captures many essential features of the
underlying stochastic model, and we use it to analyze
the impact of impatience in processor sharing queues.
We show that this impact can be substantial compared
with FCFS, and we propose a simple admission control
policy to overcome these negative impacts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; delay-differential equation; fluid
limits; measure valued process; processor sharing;
queues in overload; queues with impatience; user
behavior",
}
@Article{Yang:2006:TAP,
author = "Chang-Woo Yang and Adam Wierman and Sanjay Shakkottai
and Mor Harchol-Balter",
title = "Tail asymptotics for policies favoring short jobs in a
many-flows regime",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "97--108",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140290",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling policies that prioritize short jobs have
received growing attention in recent years. The class
of SMART policies includes many such disciplines, e.g.
Shortest-Remaining-Processing-Time (SRPT) and
Preemptive-Shortest-Job-First (PSJF). In this work, we
study the delay distribution of SMART policies and
contrast this distribution with that of the
Least-Attained-Service (LAS) policy, which indirectly
favors short jobs by prioritizing jobs with the least
attained service (age).We study the delay distribution
(rate function) of LAS and the SMART class in a
discrete-time queueing system under the many sources
regime. Our analysis in this regime (large capacity and
large number of flows) hinges on a novel two
dimensional queue representation, which creates
tie-break rules. These additional rules do not alter
the policies, but greatly simplify their analysis. We
demonstrate that the queue evolution of all the above
policies can be described under this single two
dimensional framework. We prove that all SMART policies
have the same delay distribution as SRPT and illustrate
the improvements SMART policies make over
First-Come-First-Served (FCFS). Furthermore, we show
that the delay distribution of SMART policies
stochastically improves upon the delay distribution of
LAS. However, the delay distribution under LAS is not
too bad --- the distribution of delay under LAS for
most jobs sizes still provides improvement over FCFS.
Our results are complementary to prior work that
studies delay-tail behavior in the large buffer regime
under a single flow.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "large--deviations; LAS; many--sources; rate function;
scheduling; SMART; SRPT",
}
@Article{Bonald:2006:LHT,
author = "Thomas Bonald and Aleksi Penttinen and Jorma Virtamo",
title = "On light and heavy traffic approximations of balanced
fairness",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "109--120",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140291",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Flow level analysis of communication networks with
multiple shared resources is generally difficult. A
recently introduced sharing scheme called balanced
fairness has brought these systems within the realm of
tractability. While straightforward in principle, the
numerical evaluation of practically interesting
performance metrics like per-flow throughput is
feasible for limited state spaces only, besides some
specific networks where the results are explicit. In
the present paper, we study the behaviour of balanced
fairness in light and heavy traffic regimes and show
how the corresponding performance results can be used
to approximate the flow throughput over the whole load
range. The results apply to any network, with a state
space of arbitrary dimension. A few examples are
explicitly worked out to illustrate the concepts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "balanced fairness; elastic traffic; flow level
analysis; throughput approximation",
}
@Article{Song:2006:NFF,
author = "Han Hee Song and Lili Qiu and Yin Zhang",
title = "{NetQuest}: a flexible framework for large-scale
network measurement",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "121--132",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140293",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present NetQuest, a flexible
framework for large-scale network measurement. We apply
{\em Bayesian experimental design\/} to select active
measurements that maximize the amount of information we
gain about the network path properties subject to given
resource constraints. We then apply {\em network
inference\/} techniques to reconstruct the properties
of interest based on the partial, indirect observations
we get through these measurements. By casting network
measurement in a general Bayesian decision theoretic
framework, we achieve flexibility. Our framework can
support a variety of design requirements, including (i)
differentiated design for providing better resolution
to certain parts of the network, (ii) augmented design
for conducting additional measurements given existing
observations, and (iii) joint design for supporting
multiple users who are interested in different parts of
the network. Our framework is also {\em scalable\/} and
can design measurement experiments that span thousands
of routers and end hosts. We develop a toolkit that
realizes the framework on PlanetLab. We conduct
extensive evaluation using both real traces and
synthetic data. Our results show that the approach can
accurately estimate network-wide and individual path
properties by only monitoring within 2-10\% of paths.
We also demonstrate its effectiveness in providing
differentiated monitoring, supporting continuous
monitoring, and satisfying the requirements of multiple
users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Bayesian experimental design; network inference;
network measurement; network tomography",
}
@Article{Zhao:2006:RTM,
author = "Qi Zhao and Zihui Ge and Jia Wang and Jun Xu",
title = "Robust traffic matrix estimation with imperfect
information: making use of multiple data sources",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "133--144",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140294",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimation of traffic matrices, which provide critical
input for network capacity planning and traffic
engineering, has recently been recognized as an
important research problem. Most of the previous
approaches infer traffic matrix from either SNMP link
loads or sampled NetFlow records. In this work, we
design novel inference techniques that, by
statistically correlating SNMP link loads and sampled
NetFlow records, allow for much more accurate
estimation of traffic matrices than obtainable from
either information source alone, even when sampled
NetFlow records are available at only a subset of
ingress. Our techniques are practically important and
useful since both SNMP and NetFlow are now widely
supported by vendors and deployed in most of the
operational IP networks. More importantly, this
research leads us to a new insight that SNMP link loads
and sampled NetFlow records can serve as `error
correction codes' to each other. This insight helps us
to solve a challenging open problem in traffic matrix
estimation, `How to deal with dirty data (SNMP and
NetFlow measurement errors due to
hardware/software/transmission problems)?' We design
techniques that, by comparing notes between the above
two information sources, identify and remove dirty
data, and therefore allow for accurate estimation of
the traffic matrices with the cleaned dat. We conducted
experiments on real measurement data obtained from a
large tier-1 ISP backbone network. We show that, when
full deployment of NetFlow is not available, our
algorithm can improve estimation accuracy significantly
even with a small fraction of NetFlow data. More
importantly, we show that dirty data can contaminate a
traffic matrix, and identifying and removing them can
reduce errors in traffic matrix estimation by up to an
order of magnitude. Routing changes is another a key
factor that affects estimation accuracy. We show that
using them as the a priori, the traffic matrices can be
estimated much more accurately than those omitting the
routing change. To the best of our knowledge, this work
is the first to offer a comprehensive solution which
fully takes advantage of using multiple readily
available data sources. Our results provide valuable
insights on the effectiveness of combining flow
measurement and link load measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network measurement; statistical inference; traffic
matrix",
}
@Article{Lall:2006:DSA,
author = "Ashwin Lall and Vyas Sekar and Mitsunori Ogihara and
Jun Xu and Hui Zhang",
title = "Data streaming algorithms for estimating entropy of
network traffic",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "145--156",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140295",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using entropy of traffic distributions has been shown
to aid a wide variety of network monitoring
applications such as anomaly detection, clustering to
reveal interesting patterns, and traffic
classification. However, realizing this potential
benefit in practice requires accurate algorithms that
can operate on high-speed links, with low CPU and
memory requirements. In this paper, we investigate the
problem of estimating the entropy in a streaming
computation model. We give lower bounds for this
problem, showing that neither approximation nor
randomization alone will let us compute the entropy
efficiently. We present two algorithms for randomly
approximating the entropy in a time and space efficient
manner, applicable for use on very high speed (greater
than OC-48) links. The first algorithm for entropy
estimation is inspired by the structural similarity
with the seminal work of Alon et al. for estimating
frequency moments, and we provide strong theoretical
guarantees on the error and resource usage. Our second
algorithm utilizes the observation that the performance
of the streaming algorithm can be enhanced by
separating the high-frequency items (or elephants) from
the low-frequency items (or mice). We evaluate our
algorithms on traffic traces from different deployment
scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; traffic analysis",
}
@Article{Lee:2006:SEE,
author = "Sanghwan Lee and Zhi-Li Zhang and Sambit Sahu and
Debanjan Saha",
title = "On suitability of {Euclidean} embedding of {Internet}
hosts",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "157--168",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140296",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we investigate the suitability of
embedding Internet hosts into a Euclidean space given
their pairwise distances (as measured by round-trip
time). Using the classical scaling and matrix
perturbation theories, we first establish the (sum of
the) magnitude of {\em negative\/} eigenvalues of the
(doubly-centered, squared) distance matrix as a measure
of suitability of Euclidean embedding. We then show
that the distance matrix among Internet hosts contains
negative eigenvalues of {\em large magnitude}, implying
that embedding the Internet hosts in a Euclidean space
would incur relatively large errors. Motivated by
earlier studies, we demonstrate that the inaccuracy of
Euclidean embedding is caused by a large degree of {\em
triangle inequality violation\/} (TIV) in the Internet
distances, which leads to negative eigenvalues of large
magnitude. Moreover, we show that the TIVs are likely
to occur {\em locally}, hence, the distances among
these close-by hosts cannot be estimated accurately
using a {\em global\/} Euclidean embedding, in
addition, increasing the dimension of embedding does
not reduce the embedding errors. Based on these
insights, we propose a new hybrid model for embedding
the network nodes using only a 2-dimensional Euclidean
coordinate system and small {\em error adjustment
terms}. We show that the accuracy of the proposed
embedding technique is as good as, if not better, than
that of a 7-dimensional Euclidean embedding.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Euclidean embedding; suitability; triangle
inequality",
}
@Article{Casale:2006:EAE,
author = "Giuliano Casale",
title = "An efficient algorithm for the exact analysis of
multiclass queueing networks with large population
sizes",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "169--180",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140298",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce an efficient algorithm for the exact
analysis of closed multiclass product-form queueing
network models with large population sizes. We adopt a
novel approach, based on linear systems of equations,
which significantly reduces the cost of computing
normalizing constants. With the proposed algorithm, the
analysis of a model with $N$ circulating jobs of
multiple classes requires essentially the solution of
$N$ linear systems with order independent of population
sizes. A distinguishing feature of our approach is that
we can immediately apply theorems, solution techniques,
and decompositions for linear systems to queueing
network analysis. Following this idea, we propose a
block triangular form of the linear system that further
reduces the requirements, in terms of both time and
storage, of an exact analysis. An example illustrates
the efficiency of the resulting algorithm in presence
of large populations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computational algorithms; exact analysis; multiclass
models; normalizing constant; product-form queueing
networks",
}
@Article{VanVelthoven:2006:TAT,
author = "J. {Van Velthoven} and B. {Van Houdt} and C. Blondia",
title = "Transient analysis of tree-like processes and its
application to random access systems",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "181--190",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140299",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new methodology to assess transient performance
measures of tree-like processes is proposed by
introducing the concept of tree-like processes with
marked time epochs. As opposed to the standard
tree-like process, such a process marks part of the
time epochs by following a set of Markovian rules. Our
interest lies in obtaining the system state at the
$n$-th marked time epoch as well as the mean time at
which this $n$-th marking occurs. The methodology
transforms the transient problem into a stationary one
by applying a discrete Erlangization and constructing a
reset Markov chain. A fast algorithm, with limited
memory usage, that exploits the block structure of the
reset Markov chain is developed and is based, among
others, on Sylvester matrix equations and fast Fourier
transforms. The theory of tree-like processes
generalizes the well-known paradigm of
Quasi-Birth-Death Markov chains and has various
applications. We demonstrate our approach on the
celebrated Capetanakis--Tsybakov--Mikhailov (CTM)
random access protocol yielding new insights on its
initial behavior both in normal and overload
conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "contention resolution; Matrix analytic methods; random
access algorithms; transient analysis; tree-like
processes",
}
@Article{Buchholz:2006:BSR,
author = "Peter Buchholz",
title = "Bounding stationary results of {Tandem} networks with
{MAP} input and {PH} service time distributions",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "191--202",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140300",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a new approach to compute
bounds on stationary measures of queueing systems with
an input process described by a Markovian Arrival
Process (MAP) and a sequence of stations with Phase
Type (PH) service time distributions. Such queueing
systems cannot be solved exactly since they have an
infinite state space in several natural dimensions.
Based on earlier work on the computation of bounds for
specific classes of infinite Markov chains, the paper
presents a new approach specifically tailored to the
analysis of the mentioned class of queueing networks.
By increasing the size of the state space of the
aggregated Markov chain to be solved for bound
computation, bounds can be made arbitrarily tight, but
practical limits come up due to the computational
complexity. However, we show by means of several
examples that tight bounds can be derived with low
effort for a large set of queueing systems in the
mentioned class.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bounds; Markov chains; stationary analysis; Tandem
queues",
}
@Article{Gupta:2006:FCQ,
author = "Varun Gupta and Mor Harchol-Balter and Alan Scheller
Wolf and Uri Yechiali",
title = "Fundamental characteristics of queues with fluctuating
load",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "203--215",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140301",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Systems whose arrival or service rates fluctuate over
time are very common, but are still not well understood
analytically. Stationary formulas are poor predictors
of systems with fluctuating load. When the arrival and
service processes fluctuate in a Markovian manner,
computational methods, such as Matrix-analytic and
spectral analysis, have been instrumental in the
numerical evaluation of quantities like mean response
time. However, such computational tools provide only
limited insight into the {\em functional behavior\/} of
the system with respect to its primitive input
parameters: the arrival rates, service rates, and rate
of fluctuation. For example, the shape of the function
that maps rate of fluctuation to mean response time is
not well understood, even for an M/M/1 system. Is this
function increasing, decreasing, monotonic? How is its
shape affected by the primitive input parameters? Is
there a simple closed-form approximation for the shape
of this curve? Turning to user experience: How is the
performance experienced by a user arriving into a `high
load' period different from that of a user arriving
into a `low load' period, or simply a random user. Are
there stochastic relations between these? In this
paper, we provide the first answers to these
fundamental questions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluctuating load; MAP; MMPP; non-stationary
arrivals/service; Ross's conjecture; stochastic
ordering",
}
@Article{Narayanasamy:2006:ALO,
author = "Satish Narayanasamy and Cristiano Pereira and Harish
Patil and Robert Cohn and Brad Calder",
title = "Automatic logging of operating system effects to guide
application-level architecture simulation",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "216--227",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140303",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern architecture research relies heavily on
application-level detailed pipeline simulation. A time
consuming part of building a simulator is correctly
emulating the operating system effects, which is
required even if the goal is to simulate just the
application code, in order to achieve functional
correctness of the application's execution. Existing
application-level simulators require manually hand
coding the emulation of each and every possible system
effect (e.g., system call, interrupt, DMA transfer)
that can impact the application's execution. Developing
such an emulator for a given operating system is a
tedious exercise, and it can also be costly to maintain
it to support newer versions of that operating system.
Furthermore, porting the emulator to a completely
different operating system might involve building it
all together from scratch. In this paper, we describe a
tool that can automatically log operating system
effects to guide architecture simulation of application
code. The benefits of our approach are: (a) we do not
have to build or maintain any infrastructure for
emulating the operating system effects, (b) we can
support simulation of more complex applications on our
application-level simulator, including those
applications that use asynchronous interrupts, DMA
transfers, etc., and (c) using the system effects logs
collected by our tool, we can deterministically
re-execute the application to guide architecture
simulation that has reproducible results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "architecture simulation; checkpoints; emulating system
calls",
}
@Article{Guo:2006:AMC,
author = "Fei Guo and Yan Solihin",
title = "An analytical model for cache replacement policy
performance",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "228--239",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140304",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the increasing gap between CPU and memory
speed, cache performance plays an increasingly critical
role in determining the overall performance of
microprocessor systems. One of the important factors
that a affect cache performance is the cache
replacement policy. Despite the importance, current
analytical cache performance models ignore the impact
of cache replacement policies on cache performance. To
the best of our knowledge, this paper is the first to
propose an analytical model which predicts the
performance of cache replacement policies. The input to
our model is a simple circular sequence profiling of
each application, which requires very little storage
overhead. The output of the model is the predicted miss
rates of an application under different replacement
policies. The model is based on probability theory and
utilizes Markov processes to compute each cache access'
miss probability. The model realistic assumptions and
relies solely on the statistical properties of the
application, without relying on heuristics or rules of
thumbs. The model's run time is less than 0.1 seconds,
much lower than that of trace simulations. We validate
the model by comparing the predicted miss rates of
seventeen Spec2000 and NAS benchmark applications
against miss rates obtained by detailed
execution-driven simulations, across a range of
different cache sizes, associativities, and four
replacement policies, and show that the model is very
accurate. The model's average prediction error is
1.41\%,and there are only 14 out of 952 validation
points in which the prediction errors are larger than
10\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical model; cache performance; replacement
policy",
}
@Article{Olshefski:2006:UMC,
author = "David Olshefski and Jason Nieh",
title = "Understanding the management of client perceived
response time",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "240--251",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140305",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding and managing the response time of web
services is of key importance as dependence on the
World Wide Web continues to grow. We present {\em
Remote Latency-based Management\/} (RLM), a novel
server-side approach for managing pageview response
times as perceived by remote clients, in real-time. RLM
passively monitors server-side network traffic,
accurately tracks the progress of page downloads and
their response times in real-time, and dynamically
adapts connection setup behavior and web page content
as needed to meet response time goals. To manage client
perceived pageview response times, RLM builds a novel
event node model to guide the use of several techniques
for manipulating the packet traffic in and out of a web
server complex, including fast SYN and SYN/ACK
retransmission, and embedded object removal and
rewrite. RLM operates as a stand-alone appliance that
simply sits in front of a web server complex, without
any changes to existing web clients, servers, or
applications. We have implemented RLM on an
inexpensive, commodity, Linux-based PC and present
experimental results that demonstrate its effectiveness
in managing client perceived pageview response times on
transactional e-commerce web workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; client perceived response time;
QoS; web server performance",
}
@Article{Thorup:2006:CIP,
author = "Mikkel Thorup",
title = "Confidence intervals for priority sampling",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "252--263",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140307",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With a priority sample from a set of weighted items,
we can provide an unbiased estimate of the total weight
of any subset. The strength of priority sampling is
that it gives the best possible estimate variance on
any set of input weights. For a concrete subset,
however, the variance on the estimate of its weight
depends strongly on the total set of weights and the
distribution of the subset in this set. The variance
is, for example, much smaller if weights are heavy
tailed. In this paper we show how to generate a
confidence interval directly from a priority sample,
thus complementing the weight estimates with concrete
lower and upper bounds. In particularly we will tell
how heavy subsets can likely be hidden when the
priority estimate for a subset is zero. Our confidence
intervals for priority sampling are evaluated on real
and synthetic data and compared with confidence
intervals obtained with uniform sampling, weighted
sampling with replacement, and threshold sampling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "confidence intervals; sampling",
}
@Article{Osogami:2006:FPBa,
author = "Takayuki Osogami and Toshinari Itoko",
title = "Finding probably better system configurations
quickly",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "264--275",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of computer and communication systems
can in theory be optimized by iteratively finding
better system configurations. However, a bottleneck is
the time required in simulations/experiments for
finding a better system configuration in each
iteration. We propose algorithms that quickly find a
system configuration that is probably better than the
`standard' system configuration, where the performance
of a given system configuration is estimated via
simulations or experiments. We prove that our
algorithms make correct decisions with high
probability, and various heuristics to reduce the total
simulation time are proposed. Numerical experiments
show the effectiveness of the proposed algorithms, and
this leads to several guidelines for designing
efficient and reliable optimization procedures for the
performance of computer and communication systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "local search; performance optimization; ranking and
selection; screening; simulation",
}
@Article{Bonald:2006:EMN,
author = "Thomas Bonald",
title = "The {Erlang} model with non-{Poisson} call arrivals",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "276--286",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Erlang formula is known to be insensitive to the
holding time distribution beyond the mean. While calls
are generally assumed to arrive as a Poisson process,
we prove that it is in fact sufficient that users
generate {\em sessions\/} according to a Poisson
process, each session being composed of a random,
finite number of calls and idle periods. A key role is
played by the retrial behavior in case of call
blocking. We illustrate the results by a number of
examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Erlang formula; insensitivity; loss networks",
}
@Article{Fidler:2006:WDS,
author = "Markus Fidler and Jens B. Schmitt",
title = "On the way to a distributed systems calculus: an
end-to-end network calculus with data scaling",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "287--298",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network calculus is a min-plus system theory which
facilitates the efficient derivation of performance
bounds for networks of queues. It has successfully been
applied to provide end-to-end quality of service
guarantees for integrated and differentiated services
networks. Yet, a true end-to-end analysis including the
various components of end systems as well as taking
into account mid-boxes like firewalls, proxies, or
media gateways has not been accomplished so far. The
particular challenge posed by such systems are
transformation processes, like data processing,
compression, encoding, and decoding, which may alter
data arrivals drastically. The heterogeneity, which is
reflected in the granularity of operation, for example
multimedia applications process video frames which,
however, are represented by packets in the network,
complicates the analysis further. To this end this
paper evolves a concise network calculus with scaling
functions, which allow modelling a wide variety of
transformation processes. Combined with the concept of
packetizer this theory enables a true end-to-end
analysis of distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network calculus; packetizers; scaling functions",
}
@Article{Peserico:2006:RNC,
author = "Enoch Peserico and Larry Rudolph",
title = "Robust network connectivity: when it's the big picture
that matters",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "299--310",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140312",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work analyzes the connectivity of large diameter
networks where every link has an independent
probability p of failure. We give a (relatively simple)
topological condition that guarantees good connectivity
between regions of such a network. Good connectivity
means that the regions are connected by nearly as many
disjoint, fault-free paths as there are when the entire
network is fault-free. The topological condition is
satisfied in many cases of practical interest, even
when two regions are at a distance much larger than the
expected `distance between faults', 1/p. We extend this
result to networks with failures on nodes, as well as
geometric radio networks with random distribution of
nodes in a deployment area of a given topography. A
rigorous formalization of the intuitive notion of
`hole' in a (not necessarily planar) graph is at the
heart of our result and our proof. Holes, in the
presence of faults, degrade connectivity in the region
`around' them to a distance that grows with the size of
the hole and the density of faults. Thus, to guarantee
good connectivity between two regions even in the
presence of faults, the intervening network should not
only sport multiple paths, but also not too many large
holes. Our result essentially characterizes networks
where connectivity depends on the `big picture'
structure of the network, and not on the local `noise'
caused by faulty or imprecisely positioned nodes and
links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ad hoc; connectivity; fault; network; percolation;
random; resilient; topology",
}
@Article{Dong:2006:PCT,
author = "Qunfeng Dong and Suman Banerjee and Jia Wang and
Dheeraj Agrawal and Ashutosh Shukla",
title = "Packet classifiers in ternary {CAMs} can be smaller",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "311--322",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140313",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Serving as the core component in many packet
forwarding, differentiating and filtering schemes,
packet classification continues to grow its importance
in today's IP networks. Currently, most vendors use
Ternary CAMs (TCAMs) for packet classification. TCAMs
usually use brute-force parallel hardware to
simultaneously check for all rules. One of the
fundamental problems of TCAMs is that TCAMs suffer from
range specifications because rules with range
specifications need to be translated into multiple TCAM
entries. Hence, the cost of packet classification will
increase substantially as the number of TCAM entries
grows. As a result, network operators hesitate to
configure packet classifiers using range
specifications. In this paper, we optimize packet
classifier configurations by identifying semantically
equivalent rule sets that lead to reduced number of
TCAM entries when represented in hardware. In
particular, we develop a number of effective
techniques, which include: trimming rules, expanding
rules, merging rules, and adding rules. Compared with
previously proposed techniques which typically require
modifications to the packet processor hardware, our
scheme does not require any hardware modification,
which is highly preferred by ISPs. Moreover, our scheme
is complementary to previous techniques in that those
techniques can be applied on the rule sets optimized by
our scheme. We evaluate the effectiveness and potential
of the proposed techniques using extensive experiments
based on both real packet classifiers managed by a
large tier-1 ISP and synthetic data generated randomly.
We observe significant reduction on the number of TCAM
entries that are needed to represent the optimized
packet classifier configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; semantic equivalence; ternary
CAM",
}
@Article{Zhao:2006:DNS,
author = "Qi Zhao and Jun Xu and Zhen Liu",
title = "Design of a novel statistics counter architecture with
optimal space and time efficiency",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "323--334",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140314",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of how to efficiently maintain a large
number (say millions) of statistics counters that need
to be incremented at very high speed has received
considerable research attention recently. This problem
arises in a variety of router management algorithms and
data streaming algorithms, where a large array of
counters is used to track various network statistics
and to implement various counting sketches
respectively. While fitting these counters entirely in
SRAM meets the access speed requirement, a large amount
of SRAM may be needed with a typical counter size of 32
or 64 bits, and hence the high cost. Solutions proposed
in recent works have used hybrid architectures where
small counters in SRAM are incremented at high speed,
and occasionally written back ('flushed') to larger
counters in DRAM. Previous solutions have used complex
schedulers with tree-like or heap data structures to
pick which counters in SRAM are about to overflow, and
flush them to the corresponding DRAM counters. In this
work, we present a novel hybrid SRAM/DRAM counter
architecture that consumes much less SRAM and has a
much simpler design of the scheduler than previous
approaches. We show, in fact, that our design is
optimal in the sense that for a given speed difference
between SRAM and DRAM, our design uses the
theoretically minimum number of bits per counter in
SRAM. Our design uses a small write-back buffer (in
SRAM) that stores indices of the overflowed counters
(to be flushed to DRAM) and an extremely simple
randomized algorithm to statistically guarantee that
SRAM counters do not overflow in bursts large enough to
fill up the write-back buffer even in the worst case.
The statistical guarantee of the algorithm is proven
using a combination of worst case analysis for
characterizing the worst case counter increment
sequence and a new tail bound theorem for bounding the
probability of filling up the write-back buffer.
Experiments with real Internet traffic traces show that
the buffer size required in practice is significantly
smaller than needed in the worst case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; router; statistics counter",
}
@Article{Kumar:2006:FMP,
author = "Rakesh Kumar and David D. Yao and Amitabha Bagchi and
Keith W. Ross and Dan Rubenstein",
title = "Fluid modeling of pollution proliferation in {P2P}
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "335--346",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140316",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "P2P systems are highly vulnerable to pollution attacks
in which attackers inject multiple versions of
corrupted content into the system, which is then
further proliferated by unsuspecting users. However, to
our knowledge, there are no closed-form solutions that
describe this phenomenon, nor are there models that
describe how the injection of multiple versions of
corrupted content impacts a clients' ability to receive
a valid copy. In this paper we develop a suite of fluid
models that model pollution proliferation in P2P
systems. These fluid models lead to systems of
non-linear differential equations. We obtain
closed-form solutions for the differential equations;
for the remaining models, we efficiently solve the
differential equations numerically. The models capture
a variety of user behaviors, including propensity for
popular versions, abandonment after repeated failure to
obtain a good version, freeloading, and local version
blacklisting. Our analysis reveals intelligent
strategies for attackers as well as strategies for
clients seeking to recover non-polluted content within
large-scale P2P networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid model; Markov chain; P2P; pollution attack",
}
@Article{Li:2006:FSS,
author = "Kang Li and Zhenyu Zhong",
title = "Fast statistical spam filter by approximate
classifications",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "347--358",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Statistical-based Bayesian filters have become a
popular and important defense against spam. However,
despite their effectiveness, their greater processing
overhead can prevent them from scaling well for
enterprise-level mail servers. For example, the
dictionary lookups that are characteristic of this
approach are limited by the memory access rate,
therefore relatively insensitive to increases in CPU
speed. We address this scaling issue by proposing an
acceleration technique that speeds up Bayesian filters
based on approximate classification. The approximation
uses two methods: hash-based lookup and lossy encoding.
Lookup approximation is based on the popular Bloom
filter data structure with an extension to support
value retrieval. Lossy encoding is used to further
compress the data structure. While both methods
introduce additional errors to a strict Bayesian
approach, we show how the errors can be both minimized
and biased toward a false negative classification. We
demonstrate a 6x speedup over two well-known spam
filters (bogofilter and qsf) while achieving an
identical false positive rate and similar false
negative rate to the original filters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximation; Bayesian filter; bloom filter; SPAM",
}
@Article{Kola:2006:QAB,
author = "George Kola and Mary K. Vernon",
title = "{QuickProbe}: available bandwidth estimation in two
roundtrips",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "359--360",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "available bandwidth estimation",
}
@Article{Kaushik:2006:FTW,
author = "Neena R. Kaushik and Silvia M. Figueira and Stephen A.
Chiappari",
title = "Flexible time-windows for advance reservation in
{LambdaGrids}",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "361--362",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140320",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advance-reservation requests are an essential feature
of LambdaGrids, where resources may need to be
co-allocated at pre-determined times. In this paper, we
discuss unconstrained advance reservations, which use
flexible time-windows to lower blocking probability
and, consequently, increase resource utilization. We
claim and show using simulations that the minimum
window size, which theoretically brings the blocking
probability to 0, in a first-come-first-served advance
reservation model without time-slots, equals the
waiting time in a queue-based on-demand model. We also
show, with simulations, the window sizes, which bring
the blocking probability to its minimum, for an advance
reservation model with time-slots.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "advance reservation; flexible time-windows;
LambdaGrids; scheduling",
}
@Article{Verbowski:2006:APS,
author = "Chad Verbowski and Emre Kiciman and Brad Daniels and
Yi-Min Wang and Roussi Roussev and Shan Lu and Juhan
Lee",
title = "Analyzing persistent state interactions to improve
state management",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "363--364",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file; persistent state; registry; state management;
system management; trace",
}
@Article{Verloop:2006:DOS,
author = "Maaike Verloop and Rudesindo N{\'u}{\~n}ez-Queija and
Sem Borst",
title = "Delay-optimal scheduling in bandwidth-sharing
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "365--366",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "alpha-fair strategies; bandwidth-sharing networks;
delay optimization",
}
@Article{Menth:2006:TPP,
author = "Michael Menth and Robert Henjes and Christian Zepfel
and Sebastian Gehrsitz",
title = "Throughput performance of popular {JMS} servers",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "367--368",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Java Messaging Service (JMS) facilitates
communication among distributed software components
according to the publish/subscribe principle. If the
subscribers install filter rules on the JMS server, JMS
can be used as a message routing platform, but it is
not clear whether its message throughput is
sufficiently high to support large-scale systems. In
this paper, we investigate the capacity of three high
performance JMS server implementations: FioranoMQ,
SunMQ, and WebsphereMQ. In contrast to other studies,
we focus on the message throughput in the presence of
filters and show that filtering reduces the performance
significantly. We present models for the message
processing time of each server and validate them by
measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "java messaging service; publish/subscribe; server
performance",
}
@Article{Garg:2006:OHR,
author = "Rahul Garg and Yogish Sabharwal",
title = "Optimizing the {HPCC} randomaccess benchmark on {Blue
Gene\slash L} supercomputer",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "369--370",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of supercomputers has traditionally
been evaluated using the LINPACK benchmark [3], which
stresses only the floating point units without
significantly loading the memory or the network
subsystems.\par
The HPC Challenge (HPCC) benchmark suite is being
proposed as an alternative to evaluate the performance
of supercomputers. It consists of seven benchmarks,
each designed to measure a specific aspect of the
system performance. These benchmarks include (i) the
high performance LINPACK (HPL) (ii) DGEMM, which
measures the floating point rate of execution of double
precision real matrix-matrix multiplication, (iii)
STREAM that measures sustainable memory bandwidth and
the corresponding computation rate for four simple
vector kernels, namely, copy, scale, add and triad (iv)
PTRANS that exercises the network by taking parallel
transpose of a large distributed matrix (v)
Randomaccess that measures the rate of integer updates
to random memory locations (vi) FFT which measures the
floating point rate of execution of a double precision
complex one-dimensional Discrete Fourier Transform
(DFT) and (vii) communication bandwidth and latency
which measures latency and bandwidth of a number of
simultaneous communication patterns.\par
In this paper we outline the optimization techniques
used to obtain the presently best reported performance
of the HPCC Randomaccess benchmark on the Blue Gene/L
supercomputer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmarks; high performance computing; randomaccess",
}
@Article{Piotrowski:2006:PPS,
author = "Tadeusz Piotrowski and Suman Banerjee and Sudeept
Bhatnagar and Samrat Ganguly and Rauf Izmailov",
title = "Peer-to-peer streaming of stored media: the indirect
approach",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "371--372",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140325",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "media-streaming; overlays; peer-to-peer",
}
@Article{Dholakia:2006:ANI,
author = "Ajay Dholakia and Evangelos Eleftheriou and Xiao-Yu Hu
and Ilias Iliadis and Jai Menon and KK Rao",
title = "Analysis of a new intra-disk redundancy scheme for
high-reliability {RAID} storage systems in the presence
of unrecoverable errors",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "373--374",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140326",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today's data storage systems are increasingly adopting
low-cost disk drives that have higher capacity but
lower reliability, leading to more frequent rebuilds
and to a higher risk of unrecoverable media errors. We
propose a new XOR-based intra-disk redundancy scheme,
called interleaved parity check (IPC), to enhance the
reliability of RAID systems that incurs only negligible
I/O performance degradation. The proposed scheme
introduces an additional level of redundancy inside
each disk, on top of the RAID redundancy across
multiple disks. The RAID parity provides protection
against disk failures, while the proposed scheme aims
to protect against media-related unrecoverable errors.
We develop a new model capturing the effect of
correlated unrecoverable sector errors and subsequently
use it to analyze the proposed scheme as well as the
traditional redundancy schemes based on Reed--Solomon
(RS) codes and single-parity-check (SPC) codes. We
derive closed-form expressions for the mean time to
data loss (MTTDL) of RAID 5 and RAID 6 systems in the
presence of unrecoverable errors and disk failures. We
then combine these results for a comprehensive
characterization of the reliability of RAID systems
that incorporate the proposed IPC redundancy scheme.
Our results show that in the practical case of
correlated errors, the proposed scheme provides the
same reliability as the optimum albeit more complex RS
coding scheme. Finally, the throughput performance of
incorporating the intra-disk redundancy on various RAID
systems is evaluated by means of event-driven
simulations. A detailed description of these
contributions is given in [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file and I/O systems; RAID; reliability analysis;
stochastic modeling",
}
@Article{Bower:2006:AAV,
author = "Fred A. Bower and Derek Hower and Mahmut Yilmaz and
Daniel J. Sorin and Sule Ozev",
title = "Applying architectural vulnerability {Analysis} to
hard faults in the microprocessor",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "375--376",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a new metric, Hard-Fault
Architectural Vulnerability Factor (H-AVF), to allow
designers to more effectively compare alternate
hard-fault tolerance schemes. In order to provide
intuition on the use of H-AVF as a metric, we evaluate
fault-tolerant level-1 data cache and register file
implementations using error correcting codes and a
fault-tolerant adder using triple-modular redundancy
(TMR). For each of the designs, we compute its H-AVF.
We then use these H-AVF values in conjunction with
other properties of the design, such as die area and
power consumption, to provide composite metrics. The
derived metrics provide simple, quantitative measures
of the cost-effectiveness of the evaluated designs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer architecture; hard-fault tolerance;
reliability",
}
@Article{Broberg:2006:MFM,
author = "James A. Broberg and Zhen Liu and Cathy H. Xia and Li
Zhang",
title = "A multicommodity flow model for distributed stream
processing",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "377--378",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140328",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithms; multicommodity flow; potential
function; stream processing",
}
@Article{Bonald:2006:GEF,
author = "T. Bonald",
title = "{Guest Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "2--2",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168136",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance 2005, the 24-th International Symposium on
Computer Performance, Modeling, Measurements and
Evaluation, was held in Juan-les-Pins, France, on
October 3-7, 2005. In addition to the main technical
program, a poster session was organized so that ongoing
or recent research work could be presented and
discussed in an informal setting. Submissions were
solicited as extended abstracts and reviewed by members
of the poster committee. A total of 12 posters were
selected for presentation during the conference. This
special issue of {\em Performance Evaluation Review\/}
consists of the corresponding extended abstracts, which
cover a wide range of topics in the area of performance
evaluation, analytical modeling and simulation of
computer systems and communication networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hautphenne:2006:EPP,
author = "Sophie Hautphenne and Kenji Leibnitz and Marie-Ange
Remiche",
title = "Extinction probability in peer-to-peer file
diffusion",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "3--4",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168137",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent measurement studies [8] have shown that
peer-to-peer (P2P) file sharing applications are the
major traffic source in the Internet. P2P applications,
such as eDonkey, Kazaa, or BitTorrent, form overlay
networks on the application layer and offer its peers
to download and share their files with other peers in a
highly distributed way. As a consequence, peers act
simultaneously as both clients and servers. For a
comprehensive survey of P2P technology, we refer to
[7].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mundinger:2006:APPa,
author = "J. Mundinger and R. R. Weber and G. Weiss",
title = "Analysis of peer-to-peer file dissemination amongst
users of different upload capacities",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "5--6",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, overlay networks have proven an
effective way of disseminating a file from a single
source to a group of end users via the Internet. A
number of algorithms and protocols have been suggested,
implemented and studied. In particular, much attention
has been given to peer-to-peer (P2P) systems such as
BitTorrent [2], Slurpie [10], SplitStream [1] and
Bullet [5]. The key idea is that the file is divided
into $M$ parts of equal size and that a given user may
download any one of these either from the server or
from a peer who has previously downloaded it. More
recently, a scheme based on network coding [3] has been
suggested. Here, users down-load linear combinations of
file parts rather than individual file parts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Besson:2006:GSE,
author = "Emmanuel Besson and Aline Gouget and Herv{\'e}
Sibert",
title = "The {GAIA} sensor: an early {DDoS} detection tool",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "7--8",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed Denial of Service (DDoS) attacks are a
major network security threat. Most recent host-based
DDoS detection mechanisms are dedicated to a particular
set of attacks, focusing either on the recent dynamic
of the traffic, or on its long range dependence. We
propose a DDoS early detection component based on
anomaly detection which combines static and dynamic
behavior analysis, including experimental results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hardy:2006:PCR,
author = "G. Hardy and C. Lucet and N. Limnios",
title = "Probability of connection in regular stochastic
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "9--10",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168140",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we report experiments we did on network
reliability with the BDD-based exact method we present
in [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Busic:2006:BTS,
author = "Ana Bu{\v{s}}i{\'c} and Jean-Michel Fourneau",
title = "Bounding transient and steady-state dependability
measures through algorithmic stochastic comparison",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "11--12",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We are interested in bounding dependability measures
like point and steady-state availability and
reliability of systems modelled by very large Markov
chains which are not numerically tractable. We suppose
that the state space is divided into two classes, UP
(system is operational) and DOWN states. The
reliability at time $t$ is defined as the probability
that the system has always been operational between 0
and $t$. The point availability is the probability that
the system is operational at time $t$, and the
steady-state availability is the limit, if it exists,
of this probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bossie:2006:CHT,
author = "Craig Bossie and Pierre M. Fiorini",
title = "On checkpointing and heavy-tails in unreliable
computing environments",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "13--15",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168142",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we discuss checkpointing issues that
should be considered whenever jobs execute in
unreliable computing environments. Specifically, we
show that if proper check-pointing procedures are not
properly implemented, then under certain conditions,
job completion time distributions exhibit properties of
{\em heavy-tail\/} or {\em power-tail\/} distributions
(hereafter referred to as power-tail distributions
(PT)), which can lead to highly-variable and long
completion times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mickens:2006:IDS,
author = "James W. Mickens and Brian D. Noble",
title = "Improving distributed system performance using machine
availability prediction",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "16--18",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168143",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a distributed system, a set of networked machines
provides a highly available service to remote clients.
Traditional distributed systems like AFS [2] make a
clear distinction between clients and servers. Client
machines may be poorly administered, cheaply
constructed, often offline, and possibly malicious. In
contrast, servers are expected to be well-administered
and almost always online. Highly available servers
ensure the availability and reliability of the
distributed service.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chydzinski:2006:BOC,
author = "Andrzej Chydzinski",
title = "Buffer overflow calculations in a batch arrival
queue",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "19--21",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168144",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper numerical calculations of the buffer
overflow time in a batch arrival queueing system are
presented. The results indicate that an auto-correlated
input stream, heavy-tailed batch size or service time
distribution have a critical influence on the frequency
of buffer overflows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:2006:ECP,
author = "Daniel A. Menasc{\'e} and Vasudeva Akula",
title = "Evaluating caching policies for online auctions",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "22--23",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168145",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Auction sites have grown rapidly in the last couple of
years and recent statistics indicate that eBay carries
about 50 million items for sale at any time on its site
[2]. Our previous work showed that the workload of
online auction sites is substantially different from
that of online retailers and uncovered a plethora of
interesting findings that can be used, among other
things, to improve the performance of online auction
sites [1, 3]: (i) A very large percentage of auctions
have a relatively low number of bids and bidders and a
very small percentage of auctions have a high number of
bids and bidders. (ii) There is some bidding activity
at the beginning stages of an auction. This activity
slows down in the middle and increases considerably
after 90\% of an auction's life time has elapsed. (iii)
Prices rise faster in the first 20\% of an auction's
life time than in the next 70\% of its life time.
However, after the age of an auction reaches 90\%,
prices increase much faster than in the two previous
phases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vincent:2006:PSI,
author = "Jean-Marc Vincent and J{\'e}r{\^o}me Vienne",
title = "Perfect simulation of index based routing queueing
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "24--25",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markovian queueing networks models are widely used for
performance evaluation of computer systems, production
lines, communication networks and so on. Routing
strategies allocate clients to queues after the end of
service. In many situations such as deterministic,
probabilistic, or state dependent like {\em Join the
shortest queue\/} routing, the routing function could
be written in terms of index scheduling functions
introduced in [3, 6].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2006:STQ,
author = "Cheng-Shang Chang and Yi-Ting Chen and Jay Cheng and
Po-Kai Huang and Duan-Shin Lee",
title = "From switching theory to `queueing' theory",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "26--28",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168147",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing theory is generally known as the theory to
study the performance of queues. In this extended
abstract, we are interested in another aspect of
queueing theory, the theory to construct queues. Our
interest in constructing queues originates from optical
packet switching. Traditionally, queues are relatively
cheap to build via electronic memory. However, it is
very costly to convert optical packets into electronic
packets. As such, building optical queues with minimum
complexity has become an important research topic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Giannoulis:2006:CLP,
author = "Anastasios Giannoulis and Konstantinos P. Tsoukatos
and Leandros Tassiulas",
title = "Cross-layer power control in wireless networks",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "29--31",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a power control algorithm that exploits
queue length information to achieve maximum data
throughput in single-hop CDMA wireless networks. The
algorithm operates in real-time, i.e., executes a
single iteration per data transmission. A variant of
the algorithm employing the exponential scheduling rule
steers queue length ratios to desired targets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2006:F,
author = "Mark S. Squillante",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "2--2",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215959",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nakassis:2006:TPQ,
author = "Anastase Nakassis and Vladimir Marbukh",
title = "Towards power and {QoS} aware wireless networks",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "3--5",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215960",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The paper studies the optimal use of energy in
wireless networking, the feasibility region of tasks
that share a multi-access channel, and efficient
algorithms for determining if a given set of tasks and
resources falls within the feasibility region.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network information theory; Pareto optimality",
}
@Article{Yazici:2006:EPD,
author = "Emine {\c{S}}ule Yazici and Selda
K{\"u}{\c{c}}{\"u}k{\c{c}}if{\c{c}}i and {\"O}znur
{\"O}zkasap and Mine {\c{C}}a{\u{g}}lar",
title = "Exact probability distributions for peer-to-peer
epidemic information diffusion",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "6--8",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215961",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An efficient approach for information diffusion in
distributed systems is to utilize epidemic algorithms
that involve pair-wise propagation of updates. Epidemic
algorithms are fully distributed and randomized
approaches such that every peer in an information
diffusion session picks a (subset of the other) peer(s)
randomly for efficient propagation of updates, through
periodic rounds. The underlying epidemics theory for
the biological systems studies the spreading of
infectious diseases through a population [1,2]. When
applied to an information diffusion application, such
protocols have beneficial features such as scalability,
robustness against failures and provision of eventual
consistency. Exact as well as asymptotic distributions
have been studied for different epidemic models in
[3,4]. In contrast to such previous studies, we
investigate variations of the epidemic algorithms used
in the context of distributed information diffusion and
derive exact diffusion probabilities for them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luan:2006:MOC,
author = "Hao Luan and Danny H. K. Tsang and Kin Wah Kwong",
title = "Media overlay construction via a {Markov} chain {Monte
Carlo} method",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "9--11",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215962",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the fairness issue of BT
and tackle the problem with a general framework using
proactive topology adaptations. The topology formed
possesses a special link-level homogeneity property
with each peer having the same capacity per out-degree
value. Such property guarantees that each directional
link has the same uploading bandwidth. Together with
the Tit-for-Tat policy, peers upload and download at
the same rate over each connection and therefore
achieve fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mundinger:2006:APPb,
author = "Jochen Mundinger and Richard Weber and Gideon Weiss",
title = "Analysis of peer-to-peer file dissemination",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "12--14",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215963",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, overlay networks have proven a
popular way of disseminating potentially large files
from a single server $S$ to a potentially large group
of $N$ end users via the Internet. A number of
algorithms and protocols have been suggested,
implemented and studied. In particular, much attention
has been given to peer-to-peer (P2P) systems such as
BitTorrent [5], Slurpie [20], SplitStream [4], Bullet
[11] and Avalanche [6]. The key idea is that the file
is divided into $M$ parts of equal size and that a
given user may download any one of these --- or, for
Avalanche, linear combinations of these --- either from
the server or from a peer who has previously downloaded
it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raz:2006:TMS,
author = "David Raz and Hanoch Levy and Benjamin Avi-Itzhak",
title = "On the twin measure and system predictability and
fairness",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "15--17",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215964",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two identical customers with deterministically
identical service times arrive at a queueing system
simultaneously (Twins), but leave the system 2 hours
apart. Is their sojourn time predictable? Is the system
fair? We propose a novel measure based on the principle
that in a predictable and fair system, `twin' customers
should not depart the system very far apart. We analyze
this measure for a number of common service policies
and compare the results. We compare the results to
those of other fairness and predictability approaches
proposed recently and discuss its usefulness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brown:2006:CFP,
author = "Patrick Brown",
title = "Comparing {FB} and {PS} scheduling policies",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "18--20",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215965",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we obtain new results concerning the
expected response time of the foreground-background
(FB) scheduling discipline and its comparison with
processor sharing (PS). Some results previously derived
for job sizes with finite second moment or bounded
sizes, are extended to infinite second moments. New
bounds and asymptotic results are also derived. We show
that for job sizes with infinite second moment large
jobs may benefit from the FB scheduling discipline
although this discipline favors short jobs. For certain
distributions all jobs sizes may even benefit from FB
with respect to PS showing that the performance
benefits obtained by some job sizes need not be
obtained at the expense of others.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2006:EIS,
author = "Adam Wierman",
title = "On the effect of inexact size information in size
based policies",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "21--23",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, there have been a number of scheduling
success stories in computer applications. Across a wide
array of applications, the simple heuristic of
`prioritizing small jobs' has been used to reduce user
response times with enormous success. For instance,
variants of Shortest-Remaining-Processing-Time (SRPT)
and Preemptive-Shortest-Job-First (PSJF) have been
suggested for use in web servers [5, 12], wireless
applications [6], and databases [8]. As a result of the
attention given to size based policies by computer
systems researchers, there has been a resurgence in
analytical work studying these policies. However, the
policies studied in theory, e.g. SRPT and PSJF, are
idealized versions of the policies implemented by
practitioners. In particular, the intricacies of
computer systems force the use of complex hybrid
policies in practice, though these more complex
policies are still built around the heuristic of
`prioritizing small jobs.' Thus, there exists a gap
between the results provided by theoretical research
and the needs of practitioners. This gap results from
three primary disconnects between the model studied in
theory and the needs of system designers. First, in
designing systems, the goal is not simply to provide
small response times; other performance measures are
also important. Thus, idealized policies such as SRPT
and PSJF are often tweaked by practitioners to perform
well on secondary performance measures (e.g. fairness
and slowdown) [3, 11, 12]. Second, the overhead
involved in distinguishing between an infinite number
of different priority classes typically causes system
designers to discretize policies such as SRPT and PSJF
so that they use only a small number of priority
classes (5-10) [5, 11]. Third, in many cases
information about the service demands (sizes) of jobs
is inexact. For instance, when serving static content,
web servers have exact knowledge of the sizes of the
files being served, but have inexact knowledge of
network conditions. Thus, the web server only has an
estimate of the true service demand [7, 12].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sheahan:2006:CTD,
author = "Robert Sheahan and Lester Lipsky and Pierre M. Fiorini
and S{\o}ren Asmussen",
title = "On the completion time distribution for tasks that
must restart from the beginning if a failure occurs",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "24--26",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215967",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For many systems, failure is so common that the design
choice of how to deal with it may have a significant
impact on the performance of the system. There are many
specific and distinct failure recovery schemes, but
they can be grouped into three broad classes: {\em
RESUME}, also referred to as preemptive resume (prs),
or check-pointing; {\em REPLACE}, also referred to as
preemptive repeat different (prd); and {\em RESTART},
also referred to as preemptive repeat identical (pri).
The following describes the three recovery schemes: (1)
{\em RESUME:\/} when a task is fails, it knows exactly
where it stops, and can continue from that point when
allowed to resume; (2) {\em REPLACE:\/} given a task
fails, then when it begins processing again, it starts
with a brand new task sampled from the same task time
distribution; and, (3) {\em RESTART:\/} When a task
fails, it loses all that it had acquired to up to that
point and must start anew when upon continuing later.
This is distinctly different from (2) since the task
must run at least as long as it did before it failed,
whereas a new sample, selected at random, might run for
a shorter or longer time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Silveira:2006:MST,
author = "Fernando Silveira and Edmundo {de Souza e Silva}",
title = "Modeling the short-term dynamics of packet losses",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "27--29",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215968",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet loss models play an essential role in computer
networks analysis. Performance evaluation studies often
abstract the loss and delay characteristics of a path
or network with a single end-to-end analytical model.
This model should be able to represent the
characteristics of the path and accurately reproduce
the impact of delay and losses on the studied protocol
while keeping complexity low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ott:2006:SSP,
author = "Teunis J. Ott and Jason Swanson",
title = "Stationarity of some processes in transport
protocols",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "30--32",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215969",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This note establishes stationarity of a number of
stochastic processes of interest in the study of
Transport Protocols. For many of the processes studied
in this note stationarity had been established before,
but for one class the result is new. For that class, it
was counterintuitive that stationarity was hard to
prove. This note also explains why that class offered
such stiff resistance. The stationarity is proven using
Liapunov functions, without first proving tightness by
proving boundedness of moments. After the 2006 MAMA
workshop simple conditions for existence of such
moments were obtained and were added to this note.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2006:FDT,
author = "Yuliy Baryshnikov and Ed Coffman and Jing Feng and
Vishal Misra",
title = "Free-Drop {TCP}",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "33--35",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215970",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new class of TCP congestion control
algorithms that take a non-standard approach: instead
of modifying AIMD parameters or exploiting traffic
measurements, the new protocols modify the rule for
deciding when to cut the congestion window. The class
is defined by an additional window with a packet-count
parameter $w$; the congestion window is reduced by half
when a packet loss is detected, at time $t$ say, if and
only if there has been at least one dropped packet in
the last $w$ packet transmissions prior to time $t$. An
algorithm in the class is called {\em Free-Drop TCP},
since dropped packets are `free' (they do not cause
cuts in the window size) unless they are sufficiently
bursty. We propose this new class as a means to achieve
high utilizations in high bandwidth-delay product
networks with small buffers. We analyze a fluid model
which leads to explicit estimates of the average
throughput for small loss probabilities. We then give
the results of experiments, which show that, relative
to TCP, a family of `shifted' response functions of the
form $ O(1 / \sqrt {p} - \epsilon)$ can be obtained
over a wide range of $p$ by suitably varying $w$.
Potential costs of these increases in throughput are
also examined in terms of coefficents of variation and
Jain's fairness measure. The costs range from
negligible to moderate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carofiglio:2006:ARS,
author = "G. Carofiglio and C. Chiasserini and M. Garetto and E.
Leonardi",
title = "Analysis of route stability under the random direction
mobility model",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "36--38",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215971",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we study the stability of routing paths
in a Mobile Ad-hoc Network (MANET), where links are
subject to failure due to nodes' mobility. We focus on
the Random Direction mobility model, and consider as
metrics of interest the duration and availability of
links and paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Osogami:2006:FPBb,
author = "Takayuki Osogami",
title = "Finding probably best system configurations quickly",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "39--41",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215972",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer systems often have many possible
configurations, and designing a high performance system
often requires selecting the best configuration.
Unfortunately, the performance of complex systems can
often be estimated only via simulations, or with
measurements of real systems. Since longer simulation
times are required to estimate the performance more
accurately, it is often computationally intractable to
estimate the performance of all configurations
accurately via simulations. (Measurements of real
systems can take even longer.)",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2006:AOT,
author = "David D. Yao and Heng-Qing Ye",
title = "Asymptotic optimality of threshold control in a
stochastic network based on a fixed-point
approximation",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "42--44",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In Li and Yao [5], a stochastic network with
simultaneous resource occupancy is studied, and a
threshold control policy is proposed based on a
fixed-point approximation. Here, we establish the
asymptotic optimality of this control policy under
fluid and diffusion scaling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bayati:2006:OSM,
author = "Mohsen Bayati and Mayank Sharma and Mark S.
Squillante",
title = "Optimal scheduling in a multiserver stochastic
network",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "45--47",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a fundamental scheduling problem in a
multiserver stochastic network consisting of 2 classes
of customers and 2 classes of servers. Customers of
class $k$ arrive to queue $k$ according to a Poisson
process with rate $ \lambda_k, k = 1, 2$. The service
times of class $k$ customers at class $ \ell $ servers
are i.i.d. following an exponential distribution with
mean $ \mu_{k, \ell }^{-1}, \forall k, \ell = 1, 2$,
where $ 0 < \mu {1, 1}, \mu_{1, 2}, \mu_{2, 2} < \infty
$ and $ \mu {2, 1} = 0$. Hence, class 1 customers can
be served at both classes of servers, but class 2
customers can only be served at class 2 servers. A FCFS
queueing discipline is employed at each queue. The
customer arrival and service processes are mutually
independent of each other and of all resource
allocation decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elhaddad:2006:ATS,
author = "Mahmoud Elhaddad and Rami Melhem and Taieb Znati",
title = "Analysis of a transmission scheduling algorithm for
supporting bandwidth guarantees in bufferless
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "48--63",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215957",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a network of bufferless packet multiplexers, the
user-perceived capacity of an ingress-egress tunnel
(connection) may degrade quickly with increasing path
length. This is due to the compounding of transmission
blocking probabilities along the path of the
connection, even when the links are not overloaded. In
such an environment, providing users (e.g., client
ISPs) with tunnels of statistically guaranteed
bandwidth may limit the network's connection-carrying
capacity. In this paper, we introduce and analyze a
transmission-scheduling algorithm that employs
randomization and traffic regulation at the ingress,
and batch scheduling at the links. The algorithm
ensures that a fraction of transmissions from each
connection is consistently subject to small blocking
probability at every link, so that these transmissions
are likely to survive long paths. For this algorithm,
we obtain tight bounds on the expectation and tail
probability of the blocking rate of any ingress-egress
connection. We compare the bounds to those obtained
using the FCFS link-scheduling rule. We find that the
proposed scheduling algorithm significantly improves
the network's connection-carrying capacity. In deriving
the desired bounds, we develop an analytic framework
for stochastically comparing network-wide routing and
bandwidth allocation scenarios with respect to blocking
in a packet multiplexer. The framework enables us to
formally characterize the routing and bandwidth
allocation scenarios that maximize the expected
blocking rate along the path of a tagged connection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2007:F,
author = "Mor Harchol-Balter",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "2--3",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I would like to introduce this issue by telling a
story. Sometime back in 1997, I wrote a paper on a new
idea for improving the response times of http requests
at a Web server. The idea was to schedule the HTTP
requests so as to favor requests for small files, in
accordance with the well-known scheduling policy
Shortest Remaining Processing Time (SRPT). The paper
was rejected, for many reasons, but the review that
stuck in my mind was the one that said, {\em `Why is
this person writing about scheduling? Scheduling is
dead.'\/} According to this reviewer, everything that
would ever be known about scheduling was already
described in the beautiful {\em Theory of Scheduling\/}
book, written in 1967, by Conway, Maxwell, and
Miller.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2007:FC,
author = "Adam Wierman",
title = "Fairness and classifications",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "4--12",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growing trend in computer systems towards using
scheduling policies that prioritize jobs with small
service requirements has resulted in a new focus on the
fairness of such policies. In particular, researchers
have been interested in whether prioritizing small job
sizes results in large jobs being treated `unfairly.'
However, fairness is an amorphous concept and thus
difficult to define and study. This article provides a
short survey of recent work in this area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boxma:2007:TS,
author = "Onno Boxma and Bert Zwart",
title = "Tails in scheduling",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "13--20",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper gives an overview of recent research on the
impact of scheduling on the tail behavior of the
response time of a job. We cover preemptive and
non-preemptive scheduling disciplines, consider
light-tailed and heavy-tailed distributions, and
discuss optimality properties. The focus is on results,
intuition and insight rather than methods and
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Biersack:2007:SP,
author = "Ernst W. Biersack and Bianca Schroeder and Guillaume
Urvoy-Keller",
title = "Scheduling in practice",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "21--28",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In queueing theory, it has been known for a long time
that the scheduling policy used in a system greatly
impacts user-perceived performance. For example, it has
been proven in the 1960's that size-based scheduling
policies that give priority to short jobs are optimal
with respect to mean response time. Yet, virtually no
systems today implement these policies. One reason is
that real systems are significantly more complex than a
theoretical M/M/1 or M/G/1 queue and it is not obvious
how to implement some of these policies in practice.
Another reason is that there is a fear that the big
jobs will `starve', or be treated unfairly as compared
to Processor-Sharing (PS). In this article we show,
using two important real world applications, that
size-based scheduling can be used in practice to
greatly improve mean response times in real systems,
without causing unfairness or starvation. The two
applications we consider are connection scheduling in
web servers and packet scheduling in network routers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2007:SNT,
author = "Thomas Bonald and James Roberts",
title = "Scheduling network traffic",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "29--35",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss the potential of packet scheduling as a
means to control traffic and improve performance for
both wired and wireless links. Using simple queuing
models that take into account the random nature of
traffic, we draw practical conclusions about the
expected gains and limits of scheduling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bandwidth sharing; scheduling; service
differentiation",
}
@Article{Aalto:2007:BPS,
author = "Samuli Aalto and Urtzi Ayesta and Sem Borst and Vishal
Misra and Rudesindo N{\'u}{\~n}ez-Queija",
title = "Beyond processor sharing",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "36--43",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While the (Egalitarian) Processor-Sharing (PS)
discipline offers crucial insights in the performance
of fair resource allocation mechanisms, it is
inherently limited in analyzing and designing
differentiated scheduling algorithms such as Weighted
Fair Queueing and Weighted Round-Robin. The
Discriminatory Processor-Sharing (DPS) and Generalized
Processor-Sharing (GPS) disciplines have emerged as
natural generalizations for modeling the performance of
such service differentiation mechanisms. A further
extension of the ordinary PS policy is the Multilevel
Processor-Sharing (MLPS) discipline, which has captured
a pivotal role in the analysis, design and
implementation of size-based scheduling strategies. We
review various key results for DPS, GPS and MLPS
models, highlighting to what extent these disciplines
inherit desirable properties from ordinary PS or are
capable of delivering service differentiation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic analysis; delay minimization;
discriminatory processor sharing; generalized processor
sharing; in-sensitivity; multilevel processor sharing;
queue length; service differentiation; size-based
scheduling; slowdown; sojourn time; workload",
}
@Article{Squillante:2007:SAM,
author = "Mark S. Squillante",
title = "Stochastic analysis of multiserver systems",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "44--51",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an overview of research in the
stochastic analysis of multiserver systems, where
scheduling often play a critical role. Our primary
focus is on the stochastic analysis and optimization of
multiserver systems in general, since most of this
research directly investigates scheduling issues and
all of this research provides the methods and results
that have been and will continue to be used to study
existing and future multiserver scheduling issues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pruhs:2007:COS,
author = "Kirk Pruhs",
title = "Competitive online scheduling for server systems",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "52--58",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our goal here is to illustrate the competitive online
scheduling research community's approach to online
server scheduling problems by enumerating some of the
results obtained for problems related to response and
slowdown, and by explaining some of the standard
analysis techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2007:AMJ,
author = "Hui Li and Michael Muskulus",
title = "Analysis and modeling of job arrivals in a production
grid",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "59--70",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an initial analysis of job
arrivals in a production data-intensive Grid and
investigate several traffic models for the interarrival
time processes. Our analysis focuses on the heavy-tail
behavior and autocorrelations, and the modeling is
carried out at three different levels: {\em Grid,
Virtual Organization (VO)}, and {\em region}. A set of
{\em $m$-state Markov modulated Poisson processes
(MMPP)\/} is investigated, while {\em Poisson
processes\/} and {\em hyperexponential renewal
processes\/} are evaluated for comparison studies. We
apply the {\em transportation distance\/} metric from
dynamical systems theory to further characterize the
differences between the data trace and the simulated
time series, and estimate errors by {\em
bootstrapping}. The experimental results show that
MMPPs with a certain number of states are successful to
a certain extent in simulating the job traffic at
different levels, fitting both the interarrival time
distribution and the autocorrelation function. However,
MMPPs are not able to match the autocorrelations for
certain VOs, in which strong deterministic
semi-periodic patterns are observed. These patterns are
further characterized using different representations.
Future work is needed to model both deterministic and
stochastic components in order to better capture the
correlation structure in the series.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kadayif:2007:MID,
author = "Ismail Kadayif and Mahmut Kandemir",
title = "Modeling and improving data cache reliability: 1",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "12--12",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Soft errors arising from energetic particle strikes
pose a significant reliability concern for computing
systems, especially for those running in noisy
environments. Technology scaling and aggressive leakage
control mechanisms make the problem caused by these
transient errors even more severe. Therefore, it is
very important to employ reliability enhancing
mechanisms in processor/memory designs to protect them
against soft errors. To do so, we first need to model
soft errors, and then study cost/reliability tradeoffs
among various reliability enhancing techniques based on
the model so that system requirements could be
met.\par
Since cache memories take the largest fraction of
on-chip real estate today and their share is expected
to continue to grow in future designs, they are more
vulnerable to soft errors, as compared to many other
components of a computing system. In this paper, we
first focus on a soft error model for L1 data caches,
and then explore different reliability enhancing
mechanisms. More specifically, we define a metric
called AVFC (Architectural Vulnerability Factor for
Caches), which represents the probability with which a
fault in the cache can be visible in the final output
of the program. Based on this model, we then propose
three architectural schemes for improving reliability
in the existence of soft errors. Our first scheme
prevents an error from propagating to the lower levels
in the memory hierarchy by not forwarding the
unmodified data words of a dirty cache block to the L2
cache when the dirty block is to be replaced. The
second scheme proposed selectively invalidates cache
blocks to reduce their vulnerable periods, decreasing
their chances of catching any soft errors. Based on the
AVFC metric, our experimental results show that these
two schemes are very effective in alleviating soft
errors in the L1 data cache. Specifically, by using our
first scheme, it is possible to improve the AVFC metric
by 32\% without any performance loss. On the other
hand, the second scheme enhances the AVFC metric
between 60\% and 97\%, at the cost of a performance
degradation which varies from 0\% to 21.3\%, depending
on how aggressively the cache blocks are invalidated.
To reduce the performance overhead caused by cache
block invalidation, we also propose a third scheme
which tries to bring a fresh copy of the invalidated
block into the cache via prefetching. Our experimental
results indicate that, this scheme can reduce the
performance overheads to less than 1\% for all
applications in our experimental suite, at the cost of
giving up a tolerable portion of the reliability
enhancement the second scheme achieves.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data caches; data integrity; reliability; soft errors;
vulnerability factors",
}
@Article{Gulati:2007:PAC,
author = "Ajay Gulati and Arif Merchant and Peter J. Varman",
title = "{pClock}: an arrival curve based approach for {QoS}
guarantees in shared storage systems",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "13--24",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Storage consolidation is becoming an attractive
paradigm for data organization because of the economies
of sharing and the ease of centralized management.
However, sharing of resources is viable only if
applications can be isolated from each other. This work
targets the problem of providing performance guarantees
to an application irrespective of the behavior of other
workloads. Application requirements are represented in
terms of the average throughput, latency and maximum
burst size. Most earlier schemes only do weighted
bandwidth allocation; schemes that provide control of
latency either cannot handle bursts or penalize
applications for their own prior behavior, such as
using spare capacity.\par
Our algorithm $p$ Clock is based on arrival curves that
intuitively capture the bandwidth and burst
requirements of applications. We show analytically that
an application following its arrival curve never misses
its deadline. We have implemented $p$ Clock both in
DiskSim and as a module in the Linux kernel 2.6. Our
evaluation shows three important features of $p$ Clock:
(1) benefits over existing algorithms; (2) efficient
performance isolation and burst handling; and (3) the
ability to allocate spare capacity to either speed up
some applications or to a background utility, such as
backup. $p$ Clock can be efficiently implemented in a
system without much overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "burst handling; fair scheduling; QoS; real time
guarantees; resource allocation; storage performance
virtualization",
}
@Article{Iyer:2007:QPA,
author = "Ravi Iyer and Li Zhao and Fei Guo and Ramesh Illikkal
and Srihari Makineni and Don Newell and Yan Solihin and
Lisa Hsu and Steve Reinhardt",
title = "{QoS} policies and architecture for cache\slash memory
in {CMP} platforms",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "25--36",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As we enter the era of CMP platforms with multiple
threads/cores on the die, the diversity of the
simultaneous workloads running on them is expected to
increase. The rapid deployment of virtualization as a
means to consolidate workloads on to a single platform
is a prime example of this trend. In such scenarios,
the quality of service (QoS) that each individual
workload gets from the platform can widely vary
depending on the behavior of the simultaneously running
workloads. While the number of cores assigned to each
workload can be controlled, there is no hardware or
software support in today's platforms to control
allocation of platform resources such as cache space
and memory bandwidth to individual workloads. In this
paper, we propose a QoS-enabled memory architecture for
CMP platforms that addresses this problem. The
QoS-enabled memory architecture enables more cache
resources (i.e. space) and memory resources (i.e.
bandwidth) for high priority applications based on
guidance from the operating environment. The
architecture also allows dynamic resource reassignment
during run-time to further optimize the performance of
the high priority application with minimal degradation
to low priority. To achieve these goals, we will
describe the hardware/software support required in the
platform as well as the operating environment (O/S and
virtual machine monitor). Our evaluation framework
consists of detailed platform simulation models and a
QoS-enabled version of Linux. Based on evaluation
experiments, we show the effectiveness of a QoS-enabled
architecture and summarize key findings/trade-offs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache/memory; CMP; performance; QoS; quality of
service; resource sharing principles; service level
agreements",
}
@Article{Mesnier:2007:MRF,
author = "Michael P. Mesnier and Matthew Wachs and Raja R.
Sambasivan and Alice X. Zheng and Gregory R. Ganger",
title = "Modeling the relative fitness of storage",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "37--48",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254887",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Relative fitness is a new black-box approach to
modeling the performance of storage devices. In
contrast with an absolute model that predicts the
performance of a workload on a given storage device, a
relative fitness model predicts performance {\em
differences\/} between a pair of devices. There are two
primary advantages to this approach. First, because are
lative fitness model is constructed for a device pair,
the application-device feedback of a closed workload
can be captured (e.g., how the I/O arrival rate changes
as the workload moves from device A to device B).
Second, a relative fitness model allows performance and
resource utilization to be used in place of workload
characteristics. This is beneficial when workload
characteristics are difficult to obtain or concisely
express (e.g., rather than describe the spatio-temporal
characteristics of a workload, one could use the
observed cache behavior of device A to help predict the
performance of B).\par
This paper describes the steps necessary to build a
relative fitness model, with an approach that is
general enough to be used with any black-box modeling
technique. We compare relative fitness models and
absolute models across a variety of workloads and
storage devices. On average, relative fitness models
predict bandwidth and throughput within 10-20\% and can
reduce prediction error by as much as a factor of two
when compared to absolute models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "black-box; CART; modeling; storage",
}
@Article{Wen:2007:FFI,
author = "Zhihua Wen and Sipat Triukose and Michael Rabinovich",
title = "Facilitating focused {Internet} measurements",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "49--60",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254889",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes our implementation of and initial
experiences with DipZoom (for `Deep Internet
Performance Zoom'), a novel approach to provide
focused, on-demand Internet measurements. Unlike
existing approaches that face a difficult challenge of
building a measurement platform with sufficiently
diverse measurements and measuring hosts, DipZoom
implements a matchmaking service instead, which uses
P2P concepts to bring together experimenters in need of
measurements with external measurement providers.
DipZoom offers the following two main contributions.
First, since it is just a facilitator for an open
community of participants, it promises unprecedented
availability of diverse measurements and measuring
points. Second, it can be used as a veneer over
existing measurement platforms, automating the planning
and execution of complex measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet measurement infrastructures; network
measurements; peer-to-peer systems",
}
@Article{Huang:2007:DND,
author = "Yiyi Huang and Nick Feamster and Anukool Lakhina and
Jim (Jun) Xu",
title = "Diagnosing network disruptions with network-wide
analysis",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "61--72",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254890",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To maintain high availability in the face of changing
network conditions, network operators must quickly
detect, identify, and react to events that cause
network disruptions. One way to accomplish this goal is
to monitor routing dynamics, by analyzing routing
update streams collected from routers. Existing
monitoring approaches typically treat streams of
routing updates from different routers as independent
signals, and report only the `loud' events (i.e.,
events that involve large volume of routing messages).
In this paper, we examine BGP routing data from all
routers in the Abilene backbone for six months and
correlate them with a catalog of all known disruptions
to its nodes and links. We find that many important
events are not loud enough to be detected from a single
stream. Instead, they become detectable only when
multiple BGP update streams are simultaneously
examined. This is because routing updates exhibit {\em
network-wide\/} dependencies.\par
This paper proposes using network-wide analysis of
routing information to diagnose (i.e., detect and
identify) network disruptions. To detect network
disruptions, we apply a multivariate analysis technique
on dynamic routing information, (i.e., update traffic
from all the Abilene routers) and find that this
technique can detect every reported disruption to nodes
and links within the network with a low rate of false
alarms. To identify the type of disruption, we jointly
analyze both the network-wide static configuration and
details in the dynamic routing updates; we find that
our method can correctly explain the scenario that
caused the disruption. Although much work remains to
make network-wide analysis of routing data
operationally practical, our results illustrate the
importance and potential of such an approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "anomaly detection; network management; statistical
inference",
}
@Article{Pucha:2007:UND,
author = "Himabindu Pucha and Ying Zhang and Z. Morley Mao and
Y. Charlie Hu",
title = "Understanding network delay changes caused by routing
events",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "73--84",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network delays and delay variations are two of the
most important network performance metrics directly
impacting real-time applications such as voice over IP
and time-critical financial transactions. This
importance is illustrated by past work on understanding
the delay constancy of Internet paths and recent work
on predicting network delays using virtual coordinate
systems. Merely understanding currently observed delays
is insufficient, as network performance can degrade not
only due to traffic variability but also as a result of
routing changes. Unfortunately this latter effect so
far has been ignored in understanding and predicting
delay related performance metrics of Internet paths.
Our work is the first to address this short coming by
systematically analyzing changes in network delays and
jitter of a diverse and comprehensive set of Internet
paths. Using empirical measurements, we illustrate that
routing changes can result in roundtrip delay increase
of converged paths by more than 1 second. Surprisingly,
intradomain routing changes can also cause such large
delay increase.\par
Given these observations, we develop a framework to
analyze in detail the impact of routing changes on
network delays between end-hosts. Using topology
information and properties associated with routing
changes, we explain the causes for observed delay
fluctuations and more importantly identify routing
changes that lead to predictable effects on
delay-related metrics. Using our framework, we study
the predictability of delay and jitter changes in
response to both passively observed interdomain and
actively measured intradomain routing changes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network delay changes; network jitter changes; routing
dynamics; routing events",
}
@Article{Kashyap:2007:TPR,
author = "Abhishek Kashyap and Sudipta Sengupta and Randeep
Bhatia and M. Kodialam",
title = "Two-phase routing, scheduling and power control for
wireless mesh networks with variable traffic",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "85--96",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254893",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of joint routing, scheduling
and transmission power assignment in multi-hop wireless
mesh networks with unknown traffic. We assume the
traffic is unknown, but the traffic matrix, which
specifies the traffic load between every
source-destination pair in the network, always lies
inside a polytope defined by {\em hose\/} model
constraints. The objective is to minimize the maximum
of the total transmission power in the network over all
traffic matrices in a given polytope. We propose
efficient algorithms that compute a two-phase routing,
schedule and power assignment, and prove the solution
to be 3-approximation with respect to an optimal
two-phase routing, scheduling and power assignment. We
show via extensive simulations that the proposed
algorithm has good performance at its worst operating
traffic compared to an algorithm optimized for that
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "power control; scheduling; two-phase routing; variable
traffic; wireless mesh networks",
}
@Article{Mirza:2007:MLA,
author = "Mariyam Mirza and Joel Sommers and Paul Barford and
Xiaojin Zhu",
title = "A machine learning approach to {TCP} throughput
prediction",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "97--108",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TCP {\em throughput prediction\/} is an important
capability in wide area overlay and multi-homed
networks where multiple paths may exist between data
sources and receivers. In this paper we describe a new,
lightweight method for TCP throughput prediction that
can generate accurate forecasts for a broad range of
file sizes and path conditions. Our method is based on
Support Vector Regression modeling that uses a
combination of prior file transfers and measurements of
simple path properties. We calibrate and evaluate the
capabilities of our throughput predictor in an
extensive set of lab-based experiments where ground
truth can be established for path properties using
highly accurate passive measurements. We report the
performance for our method in the ideal case of using
our passive path property measurements over a range of
test configurations. Our results show that for bulk
transfers in heavy traffic, TCP throughput is predicted
within 10\% of the actual value 87\% of the time,
representing nearly a 3-fold improvement in accuracy
over prior history-based methods. In the same lab
environment, we assess our method using less accurate
active probe measurements of path properties, and show
that predictions can be made within 10\% of the actual
value nearly 50\% of the time over a range of file
sizes and traffic conditions. This result represents
approximately a 60\% improvement over history-based
methods with a much lower impact on end-to-end paths.
Finally, we implement our predictor in a tool called
{\em PathPerf\/} and test it in experiments conducted
on wide area paths. The results demonstrate that {\em
PathPerf\/} predicts TCP through put accurately over a
variety of paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active measurements; machine learning; support vector
regression; TCP throughput prediction",
}
@Article{Ringberg:2007:SPT,
author = "Haakon Ringberg and Augustin Soule and Jennifer
Rexford and Christophe Diot",
title = "Sensitivity of {PCA} for traffic anomaly detection",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "109--120",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254895",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Detecting anomalous traffic is a crucial part of
managing IP networks. In recent years, network-wide
anomaly detection based on Principal Component Analysis
(PCA) has emerged as a powerful method for detecting a
wide variety of anomalies. We show that tuning PCA to
operate effectively in practice is difficult and
requires more robust techniques than have been
presented thus far. We analyze a week of network-wide
traffic measurements from two IP backbones (Abilene and
Geant) across three different traffic aggregations
(ingress routers, OD flows, and input links), and
conduct a detailed inspection of the feature time
series for each suspected anomaly. Our study identifies
and evaluates four main challenges of using PCA to
detect traffic anomalies: (i) the false positive rate
is very sensitive to small differences in the number of
principal components in the normal subspace, (ii) the
effectiveness of PCA is sensitive to the level of
aggregation of the traffic measurements, (iii) a large
anomaly may in advertently pollute the normal subspace,
(iv) correctly identifying which flow triggered the
anomaly detector is an inherently challenging
problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network traffic analysis; principal component
analysis; traffic engineering",
}
@Article{Lee:2007:BCS,
author = "Seungjoon Lee and Dave Levin and Vijay Gopalakrishnan
and Bobby Bhattacharjee",
title = "Backbone construction in selfish wireless networks",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "121--132",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a protocol to construct routing backbones
in wireless networks composed of selfish participants.
Backbones are inherently cooperative, so constructing
them in selfish environments is particularly difficult;
participants want a backbone to exist (so others relay
their packets) but do not want to join the backbone (so
they do not have to relay packets for others).\par
We model the wireless backbone as a public good and use
impatience as an incentive for cooperation. To
determine if and when to donate to this public good,
each participant calculates how patient it should be in
obtaining the public good. We quantify patience using
the Volunteer's Timing Dilemma (VTD), which we extend
to general multihop network settings. Using our
generalized VTD analysis, each node individually
computes as its dominant strategy the amount of time to
wait before joining the backbone. We evaluate our
protocol using both simulations and an implementation.
Our results show that, even though participants in our
system deliberately wait before volunteering, a
backbone is formed quickly. Further, the quality of the
backbone (such as the size and resulting network
lifetime) is comparable to that of existing backbone
protocols that assume altruistic behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "incentives; public good; selfish network; volunteer's
dilemma; wireless backbone",
}
@Article{Xia:2007:SFJ,
author = "Cathy H. Xia and Zhen Liu and Don Towsley and Marc
Lelarge",
title = "Scalability of fork\slash join queueing networks with
blocking",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "133--144",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates how the through put of a
general fork-join queueing network with blocking
behaves as the number of nodes increases to infinity
while the processing speed and buffer space of each
node stay unchanged. The problem is motivated by
applications arising from distributed systems and
computer networks. One example is large-scale
distributed stream processing systems where TCP is used
as the transport protocol for data transfer in between
processing components. Other examples include reliable
multicast in overlay networks, and reliable data
transfer in ad hoc networks. Using an analytical
approach, the paper establishes bounds on the
asymptotic throughput of such a network. For a subclass
of networks which are balanced, we obtain sufficient
conditions under which the network stays scalable in
the sense that the throughput is lower bounded by a
positive constant as the network size increases.
Necessary conditions of throughput scalability are
derived for general networks. The special class of
series-parallel networks is then studied in greater
detail, where the asymptotic behavior of the throughput
is characterized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic analysis; blocking; fork and join; queueing
networks; scalability; throughput",
}
@Article{Osogami:2007:OSC,
author = "Takayuki Osogami and Sei Kato",
title = "Optimizing system configurations quickly by guessing
at the performance",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "145--156",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a Web system can be greatly
improved by tuning its configuration parameters.
However, finding the optimal configuration has been a
time-consuming task due to the long measurement time
needed to evaluate the performance of a given
configuration. We propose an algorithm, which we refer
to as Quick Optimization via Guessing (QOG), that
quickly selects one of nearly best configurations with
high probability. The key ideas in QOG are (i) the
measurement of a configuration is terminated as soon as
the configuration is found to be suboptimal, and (ii)
the performance of a configuration is guessed at based
on the measured similar configurations, so that the
better configurations are more likely to be measured
before the others. If the performance of a good
configuration has been measured, a poor configuration
will be quickly found to be suboptimal with short
measurement time. We apply QOG to optimizing the
configuration of a real Web system, and find that QOG
can drastically reduce the total measurement time
needed to select the best configuration. Our
experiments also illuminate several interesting
properties of QOG specifically when it is applied to
optimizing Web systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "configuration parameters; performance optimization;
ranking and selection; regression; web system",
}
@Article{Wang:2007:SSR,
author = "Zhe Wang and Wei Dong and William Josephson and Qin Lv
and Moses Charikar and Kai Li",
title = "Sizing sketches: a rank-based analysis for similarity
search",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "157--168",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sketches are compact data structures that can be used
to estimate properties of the original data in building
large-scale search engines and data analysis systems.
Recent theoretical and experimental studies have shown
that sketches constructed from feature vectors using
randomized projections can effectively approximate L1
distance on the feature vectors with the Hamming
distance on their sketches. Furthermore, such sketches
can achieve good filtering accuracy while reducing the
metadata space requirement and speeding up similarity
searches by an order of magnitude. However, it is not
clear how to choose the size of the sketches since it
depends on data type, dataset size, and desired
filtering quality. In real systems designs, it is
necessary to understand how to choose sketch size
without the dataset, or at least without the whole
dataset.\par
This paper presents an analytical model and
experimental results to help system designers make such
design decisions. We present a rank-based filtering
model that describes the relationship between sketch
size and data set size based on the dataset distance
distribution. Our experimental results with several
datasets including images, audio, and 3D shapes show
that the model yields good, conservative predictions.
We show that the parameters of the model can be set
with a small sample data set and the resulting model
can make good predictions for a large dataset. We
illustrate how to apply the approach with a concrete
example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "feature-rich data; similarity search; sketch",
}
@Article{Park:2007:MEP,
author = "Soyeon Park and Weihang Jiang and Yuanyuan Zhou and
Sarita Adve",
title = "Managing energy-performance tradeoffs for
multithreaded applications on multiprocessor
architectures",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "169--180",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In modern computers, non-performance metrics such as
energy consumption have become increasingly important,
requiring tradeoff with performance. A recent work has
proposed performance-guaranteed energy management, but
it is designed specifically for sequential applications
and cannot be used to a large class of multithreaded
applications running on high end computers and data
servers.\par
To address the above problem, this paper makes the
first attempt to provide performance-guaranteed energy
management for multithreaded applications on
multiprocessor architectures. We first conduct a
comprehensive study on the effects of energy adaptation
on thread synchronizations and show that a
multithreaded application suffers from not only local
slowdowns due to energy adaptation, but also
significant slowdowns propagated from other threads
because of synchronization. Based on these findings, we
design three Synchronization-Aware (SA) algorithms, LWT
(Lock Waiting Time-based), CSL (Critical Section
Length-based) and ODP (Operation Delay
Propagation-based) algorithms, to estimate the energy
adaptation-induced slowdowns on each thread. The local
slowdowns are then combined across multiple threads via
three aggregation methods (MAX, AVG and SUM) to
estimate the overall application slowdown.\par
We evaluate our methods using a large multithreaded
commercial application, IBM DB2 with
industrial-strength online transaction processing
(OLTP) workloads, and six SPLASH parallel scientific
applications. Our experimental results show that LWT
combined with the MAX aggregation method not only
controls the performance slow down within the specified
limits but also conserves the most energy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy and performance tradeoffs; low power design;
memory energy management; multithreaded applications",
}
@Article{Cvetkovski:2007:AAC,
author = "Andrej Cvetkovski",
title = "An algorithm for approximate counting using limited
memory resources",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "181--190",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a randomized algorithm for
approximate counting that preserves the same modest
memory requirements of log(log n) bits per counter as
the approximate counting algorithm introduced in the
seminal paper of R. Morris (1978), and in addition, is
characterized by (i) lower expected number of memory
accesses and (ii) lower standard error on more than 99
percent of its counting range. An exact analysis of the
relevant statistical properties of the algorithm is
carried out. Performance evaluation via simulations is
also provided to validate the presented
theory.\par
Given its properties, the presented algorithm is
suitable as a basic building block of data streaming
applications having a large number of simultaneous
counters and/or operating at very high speeds. As such,
it is applicable to a wide range of measurement and
monitoring operations, including performance monitoring
of communication hardware, measurements for
optimization in large database systems, and gathering
statistics for data compression.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate counting; data streaming; network
monitoring",
}
@Article{Lee:2007:SDN,
author = "Eric S. Lee and Thom Whalen",
title = "Synthetic designs: a new form of true experimental
design for use in information systems development",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "191--202",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer scientists and software engineers seldom rely
on using experimental methods despite frequent calls to
do so. The problem may lie with the shortcomings of
traditional experimental methods. We introduce a new
form of experimental designs, synthetic designs, which
address these shortcomings. Compared with classical
experimental designs (between-subjects,
within-subjects, and matched-subjects), synthetic
designs can offer substantial reductions in sample
sizes, cost, time and effort expended, increased
statistical power, and fewer threats to validity
(internal, external, and statistical conclusion). This
new design is a variation of within-subjects design in
which each system user serves in only a single
treatment condition. System performance scores for all
other treatment conditions are derived synthetically
without repeated testing of each subject. This design,
though not applicable in all situations, can be used in
the development and testing of some computer systems
provided that user behavior is unaffected by the
version of computer system being used. We justify
synthetic designs on three grounds: this design has
been used successfully in the development of
computerized mug shot systems, showing marked
advantages over traditional designs; a detailed
comparison with traditional designs showing their
advantages on 17 of the 18 criteria considered; and an
assessment showing these designs satisfy all the
requirements of true experiments (albeit in a novel
way).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "experimental designs; synthetic experimental designs",
}
@Article{Feng:2007:PUP,
author = "Hanhua Feng and Vishal Misra and Dan Rubenstein",
title = "{PBS}: a unified priority-based scheduler",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "203--214",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Blind scheduling policies schedule tasks without
knowledge of the tasks' remaining processing times.
Existing blind policies, such as FCFS, PS, and LAS,
have proven useful in network and operating system
applications, but each policy has a separate, vastly
differing description, leading to separate and distinct
implementations. This paper presents the design and
implementation of a configurable blind scheduler that
contains a continuous, tunable parameter. By merely
changing the value of this parameter, the scheduler's
policy exactly emulates or closely approximates several
existing standard policies. Other settings enable
policies whose behavior is a hybrid of these standards.
We demonstrate the practical benefits of such a {\em
configurable\/} scheduler by implementing it into the
Linux operating system. We show that we can emulate the
behavior of Linux's existing, more complex scheduler
with a single (hybrid) setting of the parameter. We
also show, using synthetic workloads, that the best
value for the tunable parameter is not unique, but
depends on distribution of the size of tasks arriving
to the system. Finally, we use our formulation of the
configurable scheduler to contrast the behavior of
various blind schedulers by exploring how various
properties of the scheduler change as we vary our
scheduler's tunable parameter.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FCFS; LAS; Linux; PBS; queueing systems; scheduling",
}
@Article{Jelenkovic:2007:ASC,
author = "Predrag R. Jelenkovic and Xiaozhu Kang and Jian Tan",
title = "Adaptive and scalable comparison scheduling",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "215--226",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Shortest Remaining Processing Time (SRPT)
scheduling discipline is optimal and its superior
performance, compared with the policies that do not use
the knowledge of job sizes, can be quantified using
mean-value analysis as well as our new asymptotic
distribution allimits for the relatively smaller
heavy-tailed jobs. However, the main difficulty in
implementing SRPT in large practical systems, e.g., Web
servers, is that its complexity grows with the number
of jobs in the queue. Hence, in order to lower the
complexity, it is natural to approximate SRPT by
grouping the arrivals into a fixed (small) number of
classes containing jobs of approximately equal size and
then serve the classes of smaller jobs with higher
priorities.\par
In this paper, we design a novel adaptive grouping
mechanism based on relative size comparison of a newly
arriving job to the preceding $m$ arrivals.
Specifically, if the newly arriving job is smaller than
$k$ and larger than $ m - k$ of the previous $m$ jobs,
it is routed into class $k$. The excellent performance
of this mechanism,even for a small number of classes $
m + 1$, is demonstrated using both the asymptotic
queueing analysis under heavy tails and extensive
simulations. We also discuss refinements of the
comparison grouping mechanism that improve the accuracy
of job classification at the expense of a small
additional complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive thresholds; comparison scheduling; M/G/1;
scalability",
}
@Article{Bhadra:2007:OCP,
author = "Sandeep Bhadra and Yingdong Lu and Mark S.
Squillante",
title = "Optimal capacity planning in stochastic loss networks
with time-varying workloads",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "227--238",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a capacity planning optimization problem
in a general theoretical framework that extends the
classical Erlang loss model and related stochastic loss
networks to support time-varying workloads. The time
horizon consists of a sequence of coarse time
intervals, each of which involves a stochastic loss
network under a fixed multi-class workload that can
change in a general manner from one interval to the
next. The optimization problem consists of determining
the capacities for each time interval that maximize a
utility function over the entire time horizon, finite
or infinite, where rewards gained from servicing
customers are offset by penalties associated with
deploying capacities in an interval and with changing
capacities among intervals. We derive a state-dependent
optimal policy within the context of a particular
limiting regime of the optimization problem, and we
prove this solution to be asymptotically optimal. Then,
under fairly mild conditions, we prove that a similar
structural property holds for the optimal solution of
the original stochastic optimization problem, and we
show how the optimal capacities comprising this
solution can be efficiently computed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic optimality; capacity planning; Erlang
fixed-point approximation; Erlang loss formula;
stochastic dynamic programming; stochastic loss
networks; time-varying workloads",
}
@Article{Liu:2007:FLS,
author = "Jiaping Liu and Alexandre Prouti{\`e}re and Yung Yi
and Mung Chiang and H. Vincent Poor",
title = "Flow-level stability of data networks with non-convex
and time-varying rate regions",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "239--250",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we characterize flow-level stochastic
stability for networks with non-convex or time-varying
rate regions under resource allocation based on utility
maximization. Similar to prior works on flow-level
stability, we consider exogenous data arrivals with
finite workloads. However, to model many realistic
situations, the rate region, which constrains the
feasibility of resource allocation, may be either
non-convex or time-varying. When the rate region is
fixed but non-convex, we derive sufficient and
necessary conditions for stability, which coincide when
the set of allocated rate vectors has continuous
contours. When the rate region is time-varying
according to some stationary, ergodic process, we
derive the precise stability region. In both cases,the
size of the stability region depends on the resource
allocation policy, in particular, on the fairness
parameter in $ \propto $-fair utility maximization.
This is in sharp contrast with the substantial existing
literature on stability under fixed and convex rate
regions, in which the stability region coincides with
the rate region for many utility-based resource
allocation schemes, independently of the value of the
fairness parameter. We further investigate the tradeoff
between fairness and stability when rate region is
non-convex or time-varying. Numerical examples of both
wired and wireless networks are provided to illustrate
the new stability regions and tradeoffs proved in the
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; network utility maximization; resource
allocation; stability",
}
@Article{Smirni:2007:FDP,
author = "Evgenia Smirni and Frederica Darema and Albert
Greenberg and Adolfy Hoisie and Don Towsley",
title = "Future directions in performance evaluation research",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "251--252",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dong:2007:WSP,
author = "Qunfeng Dong and Suman Banerjee and Jia Wang and
Dheeraj Agrawal",
title = "Wire speed packet classification without {TCAMs}: a
few more registers (and a bit of logic) are enough",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "253--264",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet classification is the foundation of many
Internet functions such as QoS and security. A long
thread of research has proposed efficient
software-based solutions to this problem. Such software
solutions are attractive because they require cheap
memory systems for implementation, thus bringing down
the overall cost of the system. In contrast,
hardware-based solutions use more expensive memory
systems, e.g., TCAMs, but are often preferred by router
vendors for their faster classification speeds. The
goal of this paper is to find a `best-of-both-worlds'
solution --- a solution that incurs the cost of a
software-based system and has the speed of a
hardware-based one. Our proposed solution, called {\em
smart rule cache\/} achieves this goal by using minimal
hardware --- a few additional registers --- to cache
{\em evolving\/} rules which preserve classification
semantics, and additional logic to match incoming
packets to these rules. Using real traffic traces and
real rule sets from a tier-1 ISP, we show such a setup
is sufficient to achieve very high hit ratios for fast
classification in hardware. Cache miss ratios are 2--4
orders of magnitude lower than flow cache schemes.
Given its low cost and good performance, we believe our
solution may create significant impact on current
industry practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; rule cache; rule evolution",
}
@Article{Hirzel:2007:DLO,
author = "Martin Hirzel",
title = "Data layouts for object-oriented programs",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "265--276",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Object-oriented programs rely heavily on objects and
pointers, making them vulnerable to slow downs from
cache and TLB misses. The cache and TLB behavior
depends on the data layout of objects in memory. There
are many possible data layouts with different impacts
on performance, but it is not known which perform
better. This paper presents a novel framework for
evaluating data layouts. The framework both makes
implementing many layouts easy, and enables performance
measurements of real programs using a product Java
virtual machine on stock hardware. This is achieved by
sorting objects during copying garbage collection;
outside of garbage collection, program performance is
solely determined by the data layout that the sort key
implements. This paper surveys and evaluates 10 common
data layouts with 32 realistic bench mark programs
running on 3 different hardware configurations. The
results confirm the importance of data layouts for
program performance, and show that almost all layouts
yield the best performance for some programs and the
worst performance for others.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache; data layout; data placement; GC; hardware
performance counters; memory subsystem; spatial
locality; TLB",
}
@Article{Hao:2007:BHA,
author = "Fang Hao and Murali Kodialam and T. V. Lakshman",
title = "Building high accuracy {Bloom} filters using
partitioned hashing",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "277--288",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growing importance of operations such as
packet-content inspection, packet classification based
on non-IP headers, maintaining flow-state, etc. has led
to increased interest in the networking applications of
Bloom filters. This is because Bloom filters provide a
relatively easy method for hardware implementation of
set-membership queries. However, the tradeoff is that
Bloom filters only provide a probabilistic test and
membership queries can result in false positives.
Ideally, we would like this false positive probability
to be very low. The main contribution of this paper is
a method for significantly reducing this false positive
probability in comparison to existing schemes. This is
done by developing a {\em partitioned hashing\/} method
which results in a choice of hash functions that set
far fewer bits in the Bloom filter bit vector than
would be the case otherwise. This lower fill factor of
the bit vector translates to a much lower false
positive probability. We show experimentally that this
improved choice can result in as much as a ten-fold
increase in accuracy over standard Bloom filters. We
also show that the scheme performs much better than
other proposed schemes for improving Bloom filters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bloom filter; hashing",
}
@Article{Bairavasundaram:2007:ALS,
author = "Lakshmi N. Bairavasundaram and Garth R. Goodson and
Shankar Pasupathy and Jiri Schindler",
title = "An analysis of latent sector errors in disk drives",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "289--300",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The reliability measures in today's disk drive-based
storage systems focus predominantly on protecting
against complete disk failures. Previous disk
reliability studies have analyzed empirical data in an
attempt to better understand and predict disk failure
rates. Yet, very little is known about the incidence of
latent sector errors i.e., errors that go undetected
until the corresponding disk sectors are
accessed.\par
Our study analyzes data collected from production
storage systems over 32 months across 1.53 million
disks (both nearline and enterprise class). We analyze
factors that impact latent sector errors, observe
trends, and explore their implications on the design of
reliability mechanisms in storage systems. To the best
of our knowledge, this is the first study of such large
scale our sample size is at least an order of magnitude
larger than previously published studies and the first
one to focus specifically on latent sector errors and
their implications on the design and reliability of
storage systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk drive reliability; latent sector errors; MTTDL",
}
@Article{Legout:2007:CSI,
author = "Arnaud Legout and Nikitas Liogkas and Eddie Kohler and
Lixia Zhang",
title = "Clustering and sharing incentives in {BitTorrent}
systems",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "301--312",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peer-to-peer protocols play an increasingly
instrumental role in Internet content distribution. It
is therefore important to gain a complete understanding
of how these protocols behave in practice and how their
operating parameters affect overall system performance.
This paper presents the first detailed experimental
investigation of the peer selection strategy in the
popular BitTorrent protocol. By observing more than 40
nodes in instrumented private torrents, we validate
three protocol properties that, though believed to
hold, have not been previously demonstrated
experimentally: the clustering of similar-bandwidth
peers, the effectiveness of BitTorrent's sharing
incentives, and the peers' high uplink utilization. In
addition, we observe that BitTorrent's modified choking
algorithm in seed state provides uniform service to all
peers, and that an underprovisioned initial seed leads
to absence of peer clustering and less effective
sharing incentives. Based on our results, we provide
guidelines for seed provisioning by content providers,
and discuss a tracker protocol extension that addresses
an identified limitation of the protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; choking algorithm; clustering; incentives;
seed provisioning",
}
@Article{Sanghavi:2007:DLS,
author = "Sujay Sanghavi and Loc Bui and R. Srikant",
title = "Distributed link scheduling with constant overhead",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "313--324",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a new class of simple, distributed
algorithms for scheduling in wireless networks. The
algorithms generate new schedules in a distributed
manner via simple local changes to existing schedules.
The class is parameterized by integers $k$ \geq 1. We
show that algorithm $k$ of our class achieves $ k / (k
+ 2)$ of the capacity region, for every $ k \geq
1$.\par
The algorithms have small and constant worst-case
overheads: in particular, algorithm $k$ generates a new
schedule using (a) time less than $ 4 k + 2$ round-trip
times between neighboring nodes in the network, and (b)
at most three control transmissions by any given node,
for any $k$. The control signals are explicitly
specified, and face the same interference effects as
normal data transmissions. Our class of distributed
wireless scheduling algorithms are the first ones
guaranteed to achieve any fixed fraction of the
capacity region while using small and constant
overheads that do not scale with network size. The
parameter $k$ explicitly captures the tradeoff between
control overhead and scheduler throughput performance
and provides a tuning knob protocol designers can use
to harness this trade-off in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "matchings; primary interference; scheduling; wireless
networks",
}
@Article{Rajendran:2007:TBC,
author = "Raj Kumar Rajendran and Vishal Misra and Dan
Rubenstein",
title = "Theoretical bounds on control-plane self-monitoring in
routing protocols",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "325--336",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The distributed routing protocols in use today promise
to operate correctly only if all nodes implement the
protocol faithfully. A small insignificant set of nodes
have, in the past, brought an entire network to a
standstill by reporting incorrect route information.
The damage caused by these erroneous reports, in some
instances, could have been contained since incorrect
route reports sometimes reveal themselves as
inconsistencies in the state-information of correctly
functioning nodes. By checking for such inconsistencies
and taking preventive action, such as disregarding
selected route-reports, a correctly functioning node
could have limited the damage caused by the
malfunctioning nodes.\par
Our theoretical study attempts to understand when a
correctly functioning node can, by analysing its
routing-state, detect that some node is misimplementing
route selection. We present a methodology, called
Strong-Detection that helps answer the question. We
then apply Strong-Detection to three classes of routing
protocols: distance-vector, path-vector, and
link-state. For each class, we derive low-complexity
self-monitoring algorithms that take as input the
routing state and output whether any detectable
anomalies exist. We then use these algorithms to
compare and contrast the self-monitoring power of these
different classes of protocols in relation to the
complexity of the routing-state.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distance vector; misconfiguration; rogue node; routing
protocols",
}
@Article{Yuan:2007:ORF,
author = "Xin Yuan and Wickus Nienaber and Zhenhai Duan and Rami
Melhem",
title = "Oblivious routing for fat-tree based system area
networks with uncertain traffic demands",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "337--348",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fat-tree based system area networks have been widely
adopted in high performance computing clusters. In such
systems, the routing is often deterministic and the
traffic demand is usually uncertain and changing. In
this paper, we study routing performance on fat-tree
based system area networks with deterministic routing
under the assumption that the traffic demand is
uncertain. The performance of a routing algorithm under
uncertain traffic demands is characterized by the {\em
oblivious performance\/} ratio that bounds the relative
performance of the routing algorithm and the optimal
routing algorithm for any given traffic demand. We
consider both single path routing where the traffic
between each source-destination pair follows one path,
and multi-path routing where multiple paths can be used
for the traffic between a source-destination pair. We
derive lower bounds of the oblivious performance ratio
of any single path routing scheme for fat-tree
topologies and develop single path oblivious routing
schemes that achieve the optimal oblivious performance
ratio for commonly used fat-tree topologies. These
oblivious routing schemes provide the best performance
guarantees among all single path routing algorithms
under uncertain traffic demands. For multi-path
routing, we show that it is possible to obtain a scheme
that is optimal for any traffic demand (an oblivious
performance ratio of 1) on the fat-tree topology. These
results quantitatively demonstrate that single path
routing cannot guarantee high routing performance while
multi-path routing is very effective in balancing
network loads on the fat-tree topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fat-tree; oblivious routing; system area networks",
}
@Article{Nahum:2007:ESS,
author = "Erich M. Nahum and John Tracey and Charles P. Wright",
title = "Evaluating {SIP} server performance",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "349--350",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SIP is a protocol of growing importance, with uses for
VoIP, instant messaging, presence, and more. However,
its performance is not well-studied or understood. In
this extended abstract we overview our experimental
evaluation of common SIP server scenarios using
open-source SIP software such as OpenSER and SIP
pruning on Linux.\par
We show performance varies greatly depending on the
server scenario and how the protocol is used. Depending
on the configuration, through put can vary from
hundreds to thousands of operations per second. For
example, we observe that the choice of stateless vs.
stateful proxying, using TCP rather than UDP, or
including MD5-based authentication can each can affect
performance by a factor of 2-4. We also provide kernel
and application profiles using Oprofile that help
explain and illustrate processing costs. Finally, we
provide a simple fix for transaction-stateful proxying
that improves performance by a factor of 10. Full
details can be found in our accompanying technical
report.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "experimental evaluation; performance; server; SIP",
}
@Article{Puzak:2007:PS,
author = "Thomas R. Puzak and Allan Hartstein and Viji
Srinivasan and Philip Emma and Arthur Nadas",
title = "Pipeline spectroscopy",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "351--352",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache; convex combination; cost of a miss; probability
transition matrix",
}
@Article{Cohen:2007:BSB,
author = "Edith Cohen and Haim Kaplan",
title = "Bottom-$k$ sketches: better and more efficient
estimation of aggregates",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "353--354",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A {\em Bottom-$k$ sketch\/} is a summary of a set of
items with nonnegative weights. Each such summary
allows us to compute approximate aggregates over the
set of items. Bottom-$k$ sketches are obtained by
associating with each item in a ground set an
independent random rank drawn from a probability
distribution that depends on the weight of the item.
For each subset of interest, the bottom-$k$ sketch is
the set of the $k$ minimum ranked items and their
ranks. Bottom-$k$ sketches have numerous applications.
We develop and analyze data structures and estimators
for bottom-$k$ sketches to facilitate their deployment.
We develop novel estimators and algorithms that show
that they are a superior alternative to other sketching
methods in both efficiency of obtaining the sketches
and the accuracy of the estimates derived from the
sketches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate query processing; bottom-k; sampling;
sketches",
}
@Article{Gu:2007:GEM,
author = "Yu Gu and Lee Breslau and Nick G. Duffield and
Subhabrata Sen",
title = "{GRE} encapsulated multicast probing: a scalable
technique for measuring one-way loss",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "355--356",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop techniques for estimating one-way loss from
a measurement host to network routers which exploit
commonly implemented features on commercial routers and
do not require any new router capabilities. The work
addresses the problem of scalably performing one-way
loss measurements across specific network paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measurement; monitoring; multicast; one-way loss;
performance",
}
@Article{Mirkovic:2007:WSR,
author = "Jelena Mirkovic and Alefiya Hussain and Brett Willson
and Sonia Fahmy and Wei-Min Yao and Peter Reiher and
Stephen Schwab and Roshan Thomas",
title = "When is service really denied?: a user-centric {DoS}
metric",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "357--358",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Denial-of-service (DoS) research community lacks
accurate metrics to evaluate an attack's impact on
network services, its severity and the effectiveness of
a potential defense. We propose several DoS impact
metrics that measure the quality of service experienced
by end users during an attack, and compare these
measurements to application-specific thresholds. Our
metrics are ideal for testbed experimentation, since
necessary traffic parameters are extracted from packet
traces gathered during an experiment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "denial of service; measurement; metrics",
}
@Article{Guo:2007:DIM,
author = "Lei Guo and Enhua Tan and Songqing Chen and Zhen Xiao
and Xiaodong Zhang",
title = "Does {Internet} media traffic really follow
{Zipf}-like distribution?",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "359--360",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254929",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is commonly agreed that Web traffic follows the
Zipf-like distribution, which is an analytical
foundation for improving Web access performance by
client-server based proxy caching systems on the
Internet. However, some recent studies have observed
non-Zipf-like distributions of Internet media traffic
in different content delivery systems. Due to the
variety of media delivery systems and the diversity of
media content, existing studies on media traffic are
largely workload specific, and the observed access
patterns are often different from or even conflict with
each other. For Web media systems, study [3] reports
that the access pattern of streaming media is Zipf-like
in a university campus network, while study [2] finds
that it is not Zipf-like in an enterprise media
server.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "media; stretched exponential; Zipf-like",
}
@Article{Hoflehner:2007:CCS,
author = "Gerolf F. Hoflehner and Darshan Desai and Daniel M.
Lavery and Alexandru Nicolau and Alexander V.
Veidenbaum",
title = "Comparative characterization of {SPEC CPU2000} and
{CPU2006} on {Itanium}{\reg} architecture",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "361--362",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254930",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently SPEC1 released the next generation of its CPU
benchmark, widely used by compiler writers and
architects for measuring processor performance. This
calls for characterization of the applications in SPEC
CPU2006 to guide the design of future microprocessors.
In addition, it necessitates assessing the change in
the characteristics of the applications from one suite
to another. Although similar studies using the retired
SPEC CPU benchmark suites have been done in the past,
to the best of our knowledge, a thorough
characterization of CPU2006 and its comparison with
CPU2000 has not been done so far. In this paper, we
present the above; specifically, we analyze IPC
(instructions per cycle), L1, L2 data cache misses and
branch prediction, especially in CPU2006.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "branch prediction; caches; performance evaluation;
SPEC CPU benchmarks",
}
@Article{Lin:2007:PRT,
author = "Bin Lin and Arindam Mallik and Peter A. Dinda and
Gokhan Memik and Robert P. Dick",
title = "Power reduction through measurement and modeling of
users and {CPUs}: summary",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "363--364",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254931",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic voltage and frequency scaling (DVFS);
process-driven voltage scaling (PDVS); user-driven
frequency scaling (UDFS)",
}
@Article{Wang:2007:GRI,
author = "Chong Wang and John W. Byers",
title = "Generating representative {ISP} topologies from
first-principles",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "365--366",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254932",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding and modeling the factors that underlie
the growth and evolution of network topologies are
basic questions that impinge upon capacity planning,
forecasting, and protocol research. Early topology
generation work focused on generating network-wide
connectivity maps, either at the AS-level or the
router-level, typically with an eye towards reproducing
abstract properties of observed topologies. But
recently, advocates of an alternative
`first-principles' approach question the feasibility of
realizing representative topologies with simple
generative models that do not explicitly incorporate
real-world constraints, such as the relative costs of
router configurations, into the model. Our work
synthesizes these two lines by designing a topology
generation mechanism that incorporates first-principles
constraints. Our goal is more modest than that of
constructing an Internet-wide topology: we aim to
generate representative topologies for single ISPs.
However, our methods also go well beyond previous work,
as we annotate these topologies with representative
capacity and latency information. Taking only demand
for network services over a given region as input, we
propose a natural cost model for building and
interconnecting PoPs and formulate the resulting
optimization problem faced by an ISP. We devise
hill-climbing heuristics for this problem and
demonstrate that the solutions we obtain are
quantitatively similar to those in measured
router-level ISP topologies, with respect to both
topological properties and fault-tolerance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network design; network topology modeling;
optimization",
}
@Article{Bissias:2007:BDL,
author = "George Dean Bissias and Brian Neil Levine and Arnold
Rosenberg",
title = "Bounding damage from link destruction, with
application to the {Internet}",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "367--368",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254933",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "graph partitioning; spectral graph theory;
vulnerability",
}
@Article{Erman:2007:SSN,
author = "Jeffrey Erman and Anirban Mahanti and Martin Arlitt
and Ira Cohen and Carey Williamson",
title = "Semi-supervised network traffic classification",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "369--370",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254934",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "semi-supervised learning; traffic classification",
}
@Article{Mi:2007:EMI,
author = "Ningfang Mi and Alma Riska and Qi Zhang and Evgenia
Smirni and Erik Riedel",
title = "Efficient management of idleness in systems",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "371--372",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254935",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "foreground/background scheduling; storage systems",
}
@Article{deJager:2007:AIS,
author = "Douglas V. de Jager and Jeremy T. Bradley",
title = "Asynchronous iterative solution for state-based
performance metrics",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "373--374",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254936",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Solution of large sparse fixed-point problems, Mline
over x = over x and Mline over x + line over b = over
x, may be seen as underpinning many important
performance-analysis calculations. These calculations
include steady-state, passage-time and transient-time
calculations in discrete-time Markov chains,
continuous-time Markov chains and semi-Markov chains.
In recent years, much work has been done to extend the
application of asynchronous iterative fixed-point
solution methods to many different contexts. This work
has been motivated by the potential for faster
solution, more efficient use of the communication
channel and/or access to memory, and simplification of
task management and programming. In this paper, we
present theoretical developments which allow us to
extend the application of asynchronous iterative
solution methods to solve for the key performance
metrics mentioned above-such that we may employ the
full breadth of Chazan and Miranker's classes of
asynchronous iterations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asynchronous iterations; dominant eigenvectors;
matrix-vector splitting; performance analysis;
Perron--Frobenius",
}
@Article{Hoste:2007:ACP,
author = "Kenneth Hoste and Lieven Eeckhout and Hendrik
Blockeel",
title = "Analyzing commercial processor performance numbers for
predicting performance of applications of interest",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "375--376",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254937",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current practice in benchmarking commercial computer
systems is to run a number of industry-standard
benchmarks and to report performance numbers. The huge
amount of machines and the large number of benchmarks
for which performance numbers are published make it
hard to observe clear performance trends though. In
addition, these performance numbers for specific
benchmarks do not provide insight into how applications
of interest that are not part of the benchmark suite
would perform on those machines.\par
In this work we build a methodology for analyzing
published commercial machine performance data sets. We
apply statistical data analysis techniques, more in
particular principal components analysis and cluster
analysis, to reduce the amount of information to a
manageable amount to facilitate its understanding.
Visualizing SPEC CPU2000 performance numbers for 26
benchmarks and 1000+ machines in just a few graphs
gives insight into how commercial machines compare
against each other. In this work we build a methodology
for analyzing published commercial machine performance
data sets. We apply statistical data analysis
techniques, more in particular principal components
analysis and cluster analysis, to reduce the amount of
information to a manageable amount to facilitate its
understanding. Visualizing SPEC CPU2000 performance
numbers for 26 benchmarks and 1000+ machines in just a
few graphs gives insight into how commercial machines
compare against each other.\par
In addition, we provide a way of relating inherent
program behavior to these performance numbers so that
insights can be gained into how the observed
performance trends relate to the behavioral
characteristics of computer programs. This results in a
methodology for the ubiquitous benchmarking problem of
predicting performance of an application of interest
based on its similarities with the benchmarks in a
published industry-standard benchmark suite.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmark similarity; performance analysis;
performance prediction",
}
@Article{He:2007:BSS,
author = "Jiayue He and Augustin Chaintreau",
title = "{BRADO}: scalable streaming through reconfigurable
trees",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "377--378",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254938",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "application layer multicast; network overlays; TCP
tandem",
}
@Article{Nurmi:2007:QQB,
author = "Daniel Charles Nurmi and John Brevik and Rich Wolski",
title = "{QBETS}: queue bounds estimation from time series",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "379--380",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254939",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "batch scheduling; queue prediction; super-computing",
}
@Article{Deng:2007:PDS,
author = "Leiwen Deng and Aleksandar Kuzmanovic",
title = "{Pong}: diagnosing spatio-temporal {Internet}
congestion properties",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "381--382",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254940",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ability to accurately detect congestion events in
the Internet and reveal their spatial (i.e., where they
happen?) and temporal (i.e., how frequently they occur
and how long they last?) properties would significantly
improve our understanding of how the Internet operates.
In this paper we present {\em Pong}, a novel
measurement tool capable of effectively diagnosing
congestion events over short (e.g., $ \approx $100ms or
longer) time-scales, and simultaneously locating
congested points within a single hop on an end-to-end
path at the granularity of a single link.\par {\em
Pong\/} (i) uses queuing delay as indicative of
congestion, and (ii) strategically combines end-to-end
probes with those targeted to intermediate nodes.
Moreover, it (iii) achieves high sampling frequency by
sending probes to all intermediate nodes, including
uncongested ones, (iv) dramatically improves spatial
detection granularity (i.e., from path segments to
individual links), by using short-term congestion
history, (v) considerably enhances the measurement
quality by adjusting the probing methodology (e.g.,
send 4-, 3-, or 2-packet probes) based on the observed
path topology, and (vi) deterministically detects
moments of its own inaccuracy. We conduct a large-scale
measurement study on over 23,000 Internet paths and
present their spatial-temporal properties as inferred
by {\em Pong}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "coordinated probing; Pong",
}
@Article{Aalto:2007:MDO,
author = "Samuli Aalto and Urtzi Ayesta",
title = "Mean delay optimization for the {M/G/1} queue with
{Pareto} type service times",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "383--384",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254941",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Gittins index; M/G/1; mean delay; Pareto distribution;
scheduling",
}
@Article{Squillante:2007:F,
author = "Mark S. Squillante",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "2--2",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gianini:2007:PNR,
author = "Gabriele Gianini and Ernesto Damiani",
title = "{Poisson}-noise removal in self-similarity studies
based on packet-counting: factorial-moment\slash
strip-integral approach",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "3--5",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we point out that some common methods for
estimating self-similarity parameters --- involving
packet counting for the estimate of statistical moments
--- are affected by distortion at the finest
resolutions and quantization errors and we illustrate
--- using also a small sample of the Bellcore data set
--- a technique for removing this undesirable effect,
based on factorial moments and strip integrals. Then we
extend the strip-integral approach to the approximation
of the square of the Haar wavelet coefficients, for the
estimate of the Hurst self-affinity exponent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2007:FBS,
author = "Vladimir Marbukh",
title = "Fair bandwidth sharing under flow arrivals\slash
departures: effect of retransmissions on stability and
performance",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "6--8",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A flow-level Markov model for fair bandwidth sharing
with packet retransmissions and random flow
arrivals/departures is proposed. The model accounts for
retransmissions by assuming that file transfer rates
are determined by the end-to-end goodputs rather than
the corresponding throughputs as in the conventional
model. The model predicts the network instability even
under light exogenous load. Despite instability, a
desirable metastable network state with finite number
of flows in progress may exist. The network can be
stabilized in a close neighborhood of the metastable
state with admission control at the cost of small flow
rejection probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; arriving/departing flows; fair
bandwidth sharing; performance; retransmissions;
stability",
}
@Article{Osogami:2007:AMT,
author = "Takayuki Osogami",
title = "Accuracy of measured throughputs and mean response
times",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "9--11",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of computer systems such as Web
systems is measured to guarantee quality of service
(QoS) or to compare difference configurations of the
systems [8]. We consider the problem of whether we
should measure mean response time or throughput to
better guarantee QoS or to better compare different
configurations of a Web system. Specifically, is
measured mean response time or measured throughput more
accurate, when the Web system is measured for a fixed
period of time?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2007:EHM,
author = "Varun Gupta and Jim Dai and Mor Harchol-Balter and
Bert Zwart",
title = "The effect of higher moments of job size distribution
on the performance of an {\em {M/G/s}\/} queueing
system",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "12--14",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330562",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The {\em M/G/s/} queueing system is the oldest and
most classical example of multiserver systems. Such
multiserver systems are commonplace in a wide range of
applications, ranging from call centers to
manufacturing systems to computer systems, because they
are cost-effective and their serving capacity can be
easily scaled up or down.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hossfeld:2007:MOT,
author = "Tobias Ho{\ss}feld and Kenji Leibnitz and Marie-Ange
Remiche",
title = "Modeling of an online {TV} recording service",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "15--17",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, new services have emerged which utilize the
Internet as a delivery mechanism for multimedia
content. With the advent of broadband accesses, more
users are willing to download large volume content from
servers, such as video files of TV shows. While some
popular video services (e.g. YouTube.com) or some
broadcasting companies (e.g. ABC.com) use streaming
data with Flash technology, some media distributors
(e.g. iTunes) offer entire TV shows for download. In
this study, we investigate the performance of the
German site OnlineTVRecorder.com (OTR), which acts as
an online video cassette recorder (VCR) where users can
program their favorite shows over a web interface and
download the recorded files from a server or its
mirrors. These files are offered in different file
formats and can consist of several hundred megabytes up
to 1 GB or more depending on the length of the TV show
as well as the encoding format. OTR can, thus, be seen
as an example for a server-based content distribution
system with large data files.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2007:OTC,
author = "Peng Wang and Stephan Bohacek",
title = "An overview of tractable computation of optimal
scheduling and routing in mesh networks",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "18--20",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Capacity optimization by optimizing transmission
schedules of wireless networks has been an active area
of research for at least 20 years. The challenge is
that the space over which the optimization is performed
is exponential in the number of links in the network.
For example, in the simple SISO case where no power
control is used and only one bitrate is available, the
optimization must be performed over a space of size $
2^L $ where there are $L$ links in the network. Thus, a
brute force approach to this optimization is not
possible for even moderate size networks of more than a
few tens of links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2007:ESE,
author = "Florin Ciucu",
title = "Exponential supermartingales for evaluating end-to-end
backlog bounds",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "21--23",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common problem arising in network performance
analysis with the stochastic network calculus is the
evaluation of ({\em min}, +) convolutions. This paper
presents a method to solve this problem by applying a
maximal inequality to a suitable constructed
supermartingale. For a network with D/M input,
end-to-end backlog bounds obtained with this method
improve existing results at low utilizations. For the
same network, it is shown that at utilizations smaller
than a certain threshold, fluid-flow models may lead to
inaccurate approximations of packetized models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2007:IPS,
author = "Varun Gupta and Karl Sigman and Mor Harchol-Balter and
Ward Whitt",
title = "Insensitivity for {PS} server farms with {JSQ}
routing",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "24--26",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Join-the-Shortest-Queue (JSQ) is a very old and
popular routing policy for server farms. Figure 1 shows
two examples of server farm architectures employing JSQ
routing. In both cases, each incoming job is
immediately dispatched, via a front-end router, to the
queue with the fewest number of jobs, designated as the
{\em shortest queue\/} (ties are broken at random). In
Figure 1(a), jobs at a queue are served in
First-Come-First-Served (FCFS) order. In Figure 1(b),
jobs within a queue are served according to
Processor-Sharing (PS), meaning that when there are $n$
jobs at a queue, they {\em share\/} the processing
capacity, each simultaneously receiving 1/nth of the
service. We refer to Figure 1(a) as a JSQ/FCFS server
farm and to Figure 1(b) as a JSQ/PS farm. If more
detail is needed, we use the notation: M/G/K/JSQ/PS,
denoting a Poisson arrival process, i.i.d. job sizes
from a general distribution, $K$ servers, JSQ routing;
and PS scheduling at queues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "insensitivity; JSQ; processor-sharing; shortest queue
routing; single-queue approximation",
}
@Article{Casale:2007:CMA,
author = "Giuliano Casale and Eddy Z. Zhang and Evgenia Smirni",
title = "Characterization of moments and autocorrelation in
{MAPs}",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "27--29",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markovian Arrival Processes (MAPs) [9] are a general
class of point processes which admits,
hyper-exponential, Erlang, and Markov Modulated Poisson
Processes (MMPPs) as special cases. MAPs can be easily
integrated within queueing models. This makes MAPs
useful for evaluating the impact of non-Poisson
workloads in networking and for quantifying the
performance of multi-tiered e-commerce applications and
disk drives [8, 10].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Field:2007:AAN,
author = "Tony Field and Peter Harrison",
title = "Approximate analysis of a network of fluid queues",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "30--32",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fluid models have for some time been used to
approximate stochastic networks with discrete state.
These range from traditional `heavy traffic'
approximations to the recent advances in bio-chemical
system models. Here we use an approximate compositional
method to analyse a simple feedforward network of fluid
queues which comprises both probabilistic branching and
superposition. This extends our earlier work that
showed the approximation to yield excellent results for
a linear chain of fluid queues. The results are
compared with those from a simulation model of the same
system. The compositional approach is shown to yield
good approximations, deteriorating for nodes with high
load when there is correlation between their immediate
inputs. This correlation arises when a common set of
external sources feeds more than one queue, directly or
indirectly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reich:2007:TCU,
author = "Joshua Reich and Vishal Misra and Dan Rubenstein",
title = "The time-correlated update problem",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "33--35",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances in the fields of sensor networks and
mobile robotics have provided the means to place
monitoring/sensing equipment in an increasingly wide
variety of environments --- a significant proportion of
which can reasonably be expected to lack traditional
network connectivity characteristics [5] [8].
Challenged networks, operating under significant sets
of constraints in which disconnected paths and long
delays are normal events, have come to be known as
Delay/Disruption Tolerant Networks (DTN) [2]. Some
examples of environments in which DTN techniques may be
required include remote or vast domains such as
underground, underwater, outer-space, Arctic, and
mountainous environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kang:2007:PFS,
author = "W. N. Kang and F. P. Kelly and N. H. Lee and R. J.
Williams",
title = "Product form stationary distributions for diffusion
approximations to a flow-level model operating under a
proportional fair sharing policy",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "36--38",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a flow-level model of Internet congestion
control introduced by Massouli{\'e} and Roberts [2]. We
assume that bandwidth is shared amongst elastic
documents according to a weighted proportional fair
bandwidth sharing policy. With Poisson arrivals and
exponentially distributed document sizes, we focus on
the heavy traffic regime in which the average load
placed on each resource is approximately equal to its
capacity. In [1], under a mild local traffic condition,
we establish a diffusion approximation for the workload
process (and hence for the flow count process) in this
model. We first recall that result in this paper. We
then state results showing that when all of the weights
are equal (proportional fair sharing) the diffusion has
a product form invariant distribution with a strikingly
simple interpretation in terms of dual random
variables, one for each of the resources of the
network. This result can be extended to the case where
document sizes are distributed as finite mixtures of
exponentials, and to models that include multi-path
routing (these extensions are not described here, but
can be found in [1]).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2007:OCP,
author = "Yingdong Lu and Ana Radovanovi{\'c} and Mark S.
Squillante",
title = "Optimal capacity planning in stochastic loss
networks",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "39--41",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large number of application areas involve resource
allocation problems in which resources of different
capabilities are used to provide service to various
classes of customers at their arrival instants,
otherwise the opportunity to serve the customer is
lost. Stochastic loss networks are often used to
capture the dynamics and uncertainty of this class of
resource allocation problems. A wide variety of
examples include applications in telephony and data
networks, distributed computing and data centers,
inventory control and manufacturing systems, and call
and contact centers. Another emerging application area
is workforce management where, e.g., an IT services
company offers a collection of service products, each
requiring a set of resources with certain capabilities.
The customer demands for such IT service products are
stochastic and the IT services company seeks to
determine its per-class resource capacity levels in
order to maximize its profits over the long run.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cherkasova:2007:CTC,
author = "Ludmila Cherkasova and Diwaker Gupta and Amin Vahdat",
title = "Comparison of the three {CPU} schedulers in {Xen}",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "42--51",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The primary motivation for enterprises to adopt
virtualization technologies is to create a more agile
and dynamic IT infrastructure --- with server
consolidation, high resource utilization, the ability
to quickly add and adjust capacity on demand --- while
lowering total cost of ownership and responding more
effectively to changing business conditions. However,
effective management of virtualized IT environments
introduces new and unique requirements, such as
dynamically resizing and migrating virtual machines
(VMs) in response to changing application demands. Such
capacity management methods should work in conjunction
with the underlying resource management mechanisms. In
general, resource multiplexing and scheduling among
virtual machines is poorly understood. CPU scheduling
for virtual machines, for instance, has largely been
borrowed from the process scheduling research in
operating systems. However, it is not clear whether a
straight-forward port of process schedulers to VM
schedulers would perform just as well. We use the open
source Xen virtual machine monitor to perform a
comparative evaluation of three different CPU
schedulers for virtual machines. We analyze the impact
of the choice of scheduler and its parameters on
application performance, and discuss challenges in
estimating the application resource requirements in
virtualized environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marsan:2007:F,
author = "Marco Ajmone Marsan and Prashant Shenoy",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "2--3",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance 2007, the 26-th International Symposium on
Computer Performance, Modeling, Measurements, and
Evaluation was held in Cologne, Germany, on October
2--5, 2007. Like in the past, in addition to the main
technical program, a poster session was organized to
present and discuss ongoing or recent research work in
an informal setting.\par
A total of 11 posters were selected for presentation
during the conference by the Performance 2007 Technical
Program Committee. This special issue of {\em
Performance Evaluation Review\/} consists of the
extended abstracts of these posters, which cover a wide
range of topics in the area of performance evaluation,
analytical modeling and simulation of computer systems
and communication networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cesana:2007:EPC,
author = "M. Cesana and L. Campelli and F. Borgonovo",
title = "Efficiency of physical carrier sensing in wireless
access networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "4--6",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose an analytical approach for evaluating the
impact of physical carrier sensing in simple wireless
access networks. We describe the system through a
time-continuous Markov Chain, and we gather from its
solution performance measures in terms of throughput
and collision probability. We derive qualitative
dimensioning criteria for the carrier sensing itself
under different network conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cano:2007:HDE,
author = "Juan-Carlos Cano and Jos{\'e}-Manuel Cano and Eva
Gonz{\'a}lez and Carlos Calafate and Pietro Manzoni",
title = "How does energy consumption impact performance in
{Bluetooth}?",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "7--9",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328694",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we investigate the power characteristics
of the Bluetooth technology when supporting low-power
modes. We provide accurate power consumption
measurements for different Bluetooth operating modes.
Such information could be used to drive technical
decisions on battery type and design of Bluetooth-based
end systems. Finally, we examine the trade-off between
power consumption and performance for a commercial
off-the-shelf Bluetooth device. We find that the use of
the {\em sniff\/} mode could be quite compatible with
the use of multi-slot data packets. However, when the
channel conditions require selecting single slot data
packets, the {\em sniff\/} mode highly impact
performance, and so the power/delay trade-off must be
taken into consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lukas:2007:IBL,
author = "Georg Lukas and Andr{\'e} Herms and Daniel
Mahrenholz",
title = "Interval based off-line clock synchronization for
wireless mesh networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "10--12",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328695",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless mesh networks suffer from various problems
like congestion or packet collisions. To identify and
overcome these problems an exact global view of the
communication is required. However, it is not possible
to observe the whole network from a single location.
Instead, a distributed monitoring is necessary, which
has to include clock synchronization. We present a new
interval-based algorithm for the off-line
synchronization of passively monitored network events.
It calculates the worst-case time interval for every
event on a global clock, while considering inaccuracies
caused by processing jitter and non-uniform clock
drifts. The experimental evaluation on a live mesh
network shows an accuracy of better than 130$ \mu s $
over a four-hop distance, which is below the minimum
transmission time of data packets. Thereby, our
algorithm creates a highly precise global view of the
network, which allows a detailed diagnosis of wireless
mesh networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chydzinski:2007:SFB,
author = "Andrzej Chydzinski",
title = "Solving finite-buffer queues with {Markovian}
arrivals",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "13--15",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this report we study queueing systems satisfying
the following conditions:\par
{\bullet} finite buffer (waiting room)\par
{\bullet} the left-skip-free queue size process at
departure epochs\par
{\bullet} arrival process with Markovian structure",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:2007:ASM,
author = "Gianfranco Ciardo and Andrew S. Miner and Min Wan and
Andy Jinqing Yu",
title = "Approximating stationary measures of structured
continuous-time {Markov} models using matrix diagrams",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "16--18",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the stationary solution of large ergodic
continuous-time Markov chains (CTMCs) with a finite
state space $S$, i.e., the computation of $ \pi $ as
solution of $ \pi \cdot Q = 0$ subject to $ \sum_{i
\epsilon } s \pi [i] = 1$, where $Q$ coincides with
transition rate matrix $R$ except in its diagonal
elements, $ Q[i, i] = - \sum_{j \epsilon } s R[i,
j]$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Silveira:2007:PPL,
author = "Fernando Silveira and Edmundo {de Souza e Silva}",
title = "Predicting packet loss statistics with hidden {Markov}
models",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "19--21",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of applications can benefit from estimating
future loss statistics. For instance, if the end-to-end
loss characteristics of a path can be well approximated
in advance, then a media streaming application could
adapt its transmission parameters in order to deliver
data with an acceptable quality to the user. In this
work, we present a framework for adaptive prediction
using hidden Markov models (HMMs). We propose a new
class of hidden Markov models whose parameter values
can be efficiently computed as compared to general
HMMs. We also develop methods for predicting two
measures of interest from HMMs, and perform experiments
over a set of packet traces to assess the goodness of
these predictions. Finally, we apply our prediction
framework to dynamically select a forward error
correction (FEC) scheme for media streaming. Using real
Internet packet traces we evaluate the performance of
our approach by emulating a VoIP tool. The PESQ
algorithm is applied to assess the perceptual speech
quality before and after the dynamic FEC selection. Our
results show that the prediction-based approach
achieves significant quality improvements with a small
increase in the average transmission rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menth:2007:NSM,
author = "Michael Menth and Andreas Binzenh{\"o}fer and Stefan
M{\"u}hleck",
title = "A note on source models for speech traffic",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "22--24",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Speech traffic is often used in simulations to
evaluate the performance of control mechanisms in
communication networks. Therefore, trustworthy models
are required that capture the fundamental statistical
properties of typical voice sources. The G.723.1 codec
produces on/off traffic streams with fixed size
packets. The iSAC codec strongly periodic packet
streams with variable packet sizes. We propose new
models for the traffic output of both codecs and show
that their queuing properties are in good accordance
with those of original traffic traces, while existing
traffic models, that are frequently used in literature,
lead to significant discrepancies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bracciale:2007:OOP,
author = "Lorenzo Bracciale and Francesca {Lo Piccolo} and Dario
Luzzi and Stefano Salsano",
title = "{OPSS}: an overlay peer-to-peer streaming simulator
for large-scale networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "25--27",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328700",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present OPSS, an Overlay Peer-to-peer
Streaming Simulator designed to simulate large scale
(i.e. in the order of 100K nodes) peer-to-peer
streaming systems. OPSS is able to simulate a fair
(i.e. `TCP-like') sharing of the uplink and downlink
bandwidth among different connections, and it
guarantees extensibility by allowing the implementation
of different peer-to-peer streaming algorithms as
separate modules. Source code of OPSS is available
under the GPL license.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Engels:2007:ETS,
author = "Kai Engels and Ralf Heidger and Reinhold Kroeger and
Morris Milekovic and Jan Schaefer and Markus Schmid and
Marcus Thoss",
title = "{eMIVA}: tool support for the instrumentation of
critical distributed applications",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "28--30",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328701",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, enterprise applications faced an ever
growing complexity of business processes as well as an
increase in the number of interacting hardware and
software components. The ability to efficiently manage
their IT infrastructure up to the application level is
therefore critical to a company's success and results
in rising importance of Service Level Management (SLM)
technologies [6, 10]. As a prerequisite for application
management, monitoring and instrumentation techniques
face growing interest. Depending on the criticality of
an application, monitoring can either be based on
statistical samples, or can require monitoring of each
request handled by the system, e.g. for validation or
verification purposes. While most enterprise
applications belong to the first category, air traffic
control scenarios are an example for the second
category. Here, even a statistically small number of
slow requests may result in dangerous situations or
fatal accidents.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dube:2007:CPQ,
author = "Parijat Dube and Corinne Touati and Laura Wynter",
title = "Capacity planning, quality of service and price wars",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "31--33",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model the relationship between capacity, Quality of
Service (QoS) and offered prices of service providers
in a competitive e-services market. Capacity and QoS
are linked through simple queueing formulae while QoS
and price are coupled through distributions on customer
preferences. We study the sensitivity of market share
of providers to price, capacity and market size. We
revisit the notion of `price wars' that has been shown
to lead to zero profits for all providers and conclude
that our more general model does admit some form of
anomalous behavior, but which need not lead to zero
profits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Begin:2007:HLA,
author = "Thomas Begin and Alexandre Brandwajn and Bruno Baynat
and Bernd E. Wolfinger and Serge Fdida",
title = "High-level approach to modeling of observed system
behavior",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "34--36",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current computer systems and communication networks
tend to be highly complex, and they typically hide
their internal structure from their users. Thus, for
selected aspects of capacity planning, overload control
and related applications, it is useful to have a method
allowing one to find good and relatively simple
approximations for the observed system behavior. This
paper investigates one such approach where we attempt
to represent the latter by adequately selecting the
parameters of a set of queueing models. We identify a
limited number of queueing models that we use as
`Building Blocks' (BBs) in our procedure. The selected
BBs allow us to accurately approximate the measured
behavior of a range of different systems. We propose an
approach for selecting and combining suitable BB, as
well as for their calibration. Finally, we validate our
methodology and discuss the potential and the
limitations of the proposed approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misra:2007:F,
author = "Vishal Misra",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "37--37",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328705",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sigmetrics 2007 was held from June 12--16th in San
Diego as part of the Federated Computing Research
Conference. This year a Student Workshop was introduced
in addition to the main technical program, and it was
held on June 12th and 13th. Submissions were solicited
in the form of extended abstracts and reviewed by a
program committee. This special issue of {\em
Performance Evaluation Review\/} presents the 16
abstracts finally chosen for the program. All the
authors of accepted abstracts were given a travel grant
by Sigmetrics to come and attend the whole conference.
The program started on the afternoon of June 12th with
a highly educative, informative and entertaining talk
by Simon-Peyton Jones of Microsoft Research Cambridge
on `How to write a great paper'. The next day the
student authors presented their research in a poster
session that was well attended by the regular
conference attendees. Special mention must go to the
outgoing Sigmetrics Chair, Albert Greenberg, who spent
a considerable amount of time with each and every
student presenter and gave valuable feedback to them.
After the poster session in the afternoon we had a
panel on `Performance Evaluation: An Industry
Perspective'. The participants were Albert Greenberg
(Microsoft Research), Arif Merchant (HP Labs), Muthu
Muthukrishnan (Google), Shubhabrata Sen (AT&T
Research), and Cathy Xia (IBM). The panel was
originally scheduled to run for 90 minutes, but it ran
almost twice the scheduled time with neither the
audience nor the panelists in any mood to cut short the
lively discussion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2007:LWA,
author = "Wenbin Zhu and Patrick G. Bridges and Arthur B.
Maccabe",
title = "Light-weight application monitoring and tuning with
embedded gossip",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "38--39",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328706",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For large-scale, long-running applications, monitoring
can be expensive. While traditional trace-based
monitoring provides detailed information about an
application, it is expensive to record and gather the
traced performance data. Processing the voluminous
traced data is so demanding that information about the
monitored application is only available post-mortem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kortebi:2007:IAS,
author = "Riadh M. Kortebi and Yvon Gourhant and Nazim
Agoulmine",
title = "Interference-aware {SINR}-based routing: algorithms
and evaluation",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "40--42",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328707",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of mitigating interference and
improving network capacity in, single-radio,
single-channel, wireless multi-hop network. An ongoing
aim of our research is to design a routing metric which
is cognizant of interference. Modelling routing with a
complete set of interference constraints is a NP-hard
problem. One major issue to be addressed is to infer
the degree of interference among different flows. To
address this issue, and based on the measurement of the
received signal strengths, we propose a 2-Hop
interference Estimation AlgoRithm (2-HEAR). With the
use of the received signal level, a node can calculate
the signal to interference plus noise ratio (SINR) of
the links to its neighbors. The calculated SINR is used
to infer the packet error rate (PER) between a node and
each of it I$^{\em st}$ tier interfering nodes set.
Then the residual capacity at a given node is estimated
using the calculated PERs. A cost function is used at
the aim of load-balancing between the different flows
within the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bui:2007:ORA,
author = "Loc Bui and R. Srikant and Alexander Stolyar",
title = "Optimal resource allocation for multicast flows in
multihop wireless networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "43--43",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328708",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, the network utilization maximization theory
has been extended to include resource allocation for
multi-hop wireless networks. However, the existing
theory is applicable only to unicast flows. Other than
developing appropriate notations, it is somewhat
straightforward to extend the theory to multicast flows
if one assumes that data is delivered to all the
receivers in a multicast group at the same rate. Such a
form of multicast is called single-rate multicast. On
the other hand, there are many video applications which
allow layered-transmission so that different receivers
can subscribe to different numbers of layers and
receive different qualities of the same video,
depending upon the congestion level in their respective
neighborhoods. Moreover, in wireless networks, due to
varying signal strengths at different receivers, it may
not be desirable nor feasible to deliver data at the
same rate to all the receivers in a multicast group.
Thus, it is important to extend the optimization-based
theory to handle multi-rate multicast flows, i.e.,
multicast flows where different receivers are allowed
to receive at different rates. Such an extension is not
straightforward as in the case of single-rate
multicast, and is the main subject of this paper. We
note that the multi-rate multicast problem has been
considered in the context of wired network. However,
those approaches cannot be directly applied to wireless
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mi:2007:PIA,
author = "Ningfang Mi",
title = "Performance impacts of autocorrelated flows in
multi-tiered systems",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "44--45",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328709",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We focus on the general problem of capacity planning
and performance prediction of multi-tiered systems.
Workload characterization studies of such systems
usually examine the stochastic characteristics of
arrivals to the system and wait/service times at
various tiers aiming at bottleneck identification,
diagnosing the conditions under which bottlenecks are
triggered, and assisting the development of resource
management policies to improve performance or provide
service level provisioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kaushik:2007:RCA,
author = "Neena Kaushik and Silvia Figueira and Stephen A.
Chiappari",
title = "Resource co-allocation using advance reservations with
flexible time-windows",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "46--48",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328710",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Co-allocations require the availability of more than
one resource for utilization in a time interval. We
show that co-allocations increase the blocking
probability and analyze the use of flexible windows to
lower blocking probability in spite of
co-allocations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "advance reservations; blocking probability;
co-allocation; flexible time-windows",
}
@Article{Verloop:2007:ERA,
author = "Maaike Verloop and Rudesindo N{\'u}{\~n}ez-Queija",
title = "Efficient resource allocation in bandwidth-sharing
networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "49--50",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328711",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Document transfer in the Internet is regulated by
distributed packet-based congestion control mechanisms,
usually relying on TCP. By dividing a document into
packets, parts of one file reside at different nodes
along the transmission path. The `instantaneous
transfer rate' of the entire document can be thought of
as being equal to the minimum transfer rate along the
entire path. Bandwidth-sharing networks as considered
by Massouli{\'e} & Roberts [2] provide a natural
modeling framework for the dynamic flow-level
interaction among document transfers. The class $
\alpha $-fair policies for such networks, as introduced
by Mo \& Walrand [3], captures a wide range of
distributed allocation mechanisms such as TCP, the
proportional fair allocation and the max-min fair
allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miretskiy:2007:TQS,
author = "D. I. Miretskiy and W. R. W. Scheinhardt and M. R. H.
Mandjes",
title = "Tandem queue with server slow-down",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "51--52",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328712",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study how rare events happen in the standard
two-node tandem Jackson queue and in a generalization,
the so-called slow-down network, see [2]. In the latter
model the service rate of the first server depends on
the number of jobs in the second queue: the first
server slows down if the amount of jobs in the second
queue is above some threshold and returns to its normal
speed when the number of jobs in the second queue is
below the threshold. This property protects the second
queue, which has a finite capacity $B$, from overflow.
In fact this type of overflow is precisely the rare
event we are interested in. More precisely, consider
the probability of overflow in the second queue before
the entire system becomes empty. The starting position
of the two queues may be any state in which at least
one job is present.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Volkovich:2007:SMW,
author = "Y. Volkovich and D. Donato and N. Litvak",
title = "Stochastic models for {Web} ranking",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "53--53",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328713",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web search engines need to deal with hundreds and
thousands of pages which are relevant to a user's
query. Listing them in the right order is an important
and non-trivial task. Thus Google introduced {\em
PageRank\/} [1] as a popularity measure for Web pages.
Besides its primary application in search engines,
PageRank also became a major method for evaluating
importance of nodes in different informational networks
and database systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hylick:2007:HDP,
author = "Anthony Hylick and Andrew Rice and Brian Jones and
Ripduman Sohan",
title = "Hard drive power consumption uncovered",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "54--55",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328714",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Power consumption is a problem affecting all forms of
computing, from server farms to mobile devices. Hard
disks account for a significant percentage of a
machine's power consumption due to the mechanical
nature of drive operation and increasingly
sophisticated electronics. Due to this fact, there has
been much research conducted with aims at reducing the
power consumption of hard drives; examples including
adaptive spin-down policies [1] and probabilistic
management approaches [4]. However, this work has been
done without fine-grained measurements of drive power
consumption to accurately characterize trends; a
shortcoming observed by other authors [3].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulati:2007:TFE,
author = "Ajay Gulati and Peter Varman and Arif Merchant and
Mustafa Uysal",
title = "Towards fairness and efficiency in storage systems",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "56--58",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328715",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fairness and overall I/O efficiency are two opposing
forces when it comes to sharing I/O among different
applications. Although providing QoS guarantees for
applications sharing a storage server are desirable
under many scenarios, existing work has not been able
to make a convincing case for using fairness mechanisms
for disk scheduling, mainly due to their impact on
overall throughout. In this work, we plan to
investigate two major issues: (1) study the trade-off
between fairness and efficiency, and develop mechanisms
to improve the I/O efficiency of fair schedulers (2)
provide performance guarantees to applications in terms
of higher-level application metrics (such as
transactions/sec), by changing the parameters in a
fairness algorithm that affect the allocations at the
block level.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heimlicher:2007:EEV,
author = "Simon Heimlicher and Pavan Nuggehalli and Martin May",
title = "End-to-end vs. hop-by-hop transport",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "59--60",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The transport layer has been considered an end-to-end
issue since the early days of the Internet in the 1980s
[1], when the TCP/IP protocol suite was designed to
connect networks of dedicated routers over wired links.
However, over the last quarter of a century, network
technology as well as the understanding of the Internet
has changed, and today's wireless networks differ from
the Internet in many aspects. Since wireless links are
unreliable, it is often impossible to sustain an
end-to-end connection to transmit data in wireless
network scenarios. Even if an end-to-end path exists in
the network topology for some fraction of the
communication, it is likely to break due to signal
propagation impairments, interference, or node
mobility. Under these circumstances, the operation of
an end-to-end transport protocol such as TCP may be
severely affected.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balakrichenan:2007:SPT,
author = "Sandoche Balakrichenan and Thomas Bugnazet and Monique
Becker",
title = "A simulation platform: for testing and optimization of
{ENUM} architecture",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "61--63",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Electronic NUmber Mapping (ENUM)[1] System, a suite of
protocols developed by IETF is one of the simplest
approach which permits communicating from the telephony
to the Internet Protocol (IP) world and vice versa in a
seamless manner. Implementing ENUM is simple because it
uses the existing Domain Name System (DNS) to store and
serve the information linking PSTN telephone numbers to
network addresses and services (email address, SIP
phone number etc.). Explanation of how a telephone
number is converted to a Fully Qualified Domain Name
(FQDN) is shown in fig.1.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DNS; ENUM; HMM; model",
}
@Article{Mohror:2007:SEB,
author = "Kathryn Mohror and Karen L. Karavanic",
title = "Scalable event-based performance measurement in
high-end environments",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "64--65",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We are developing a novel performance measurement
technique to address the scalability challenges of
event-based tracing on high-end computing systems. We
collect the information needed to diagnose performance
problems that traditionally require traces, but at a
greatly reduced data volume. Performance analysis
working on today's high-end systems require event-based
measurements to correctly identify the root cause of a
number of the complex performance problems that arise
on these highly parallel systems. These
high-end-architectures contain tens to hundreds of
thousands of processors, pushing application
scalability challenges to new heights. Unfortunately,
the collection of event-based data presents scalability
challenges itself: the added measurement instructions
and tool activities perturb the target application; and
the large volume of collected data increases tool
overhead, and results in data files that are difficult
to store and analyze.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vicari:2007:DRP,
author = "Claudio Vicari and Chiara Petrioli and Francesco {Lo
Presti}",
title = "Dynamic replica placement and traffic redirection in
content delivery networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "66--68",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper jointly addresses dynamic replica placement
and traffic redirection to the best replica in Content
Delivery Networks (CDNs). Our solution is fully
distributed and localized and trade-offs the costs paid
by the CDN provider (e.g., the number of allocated
replicas, frequency of replicas additions and removals)
with the quality of the content access service as
perceived by the final user. Our simulations
experiments show that the proposed scheme results into
a number of replicas which is only slightly higher than
the minimum required to be able to satisfy all users
requests, thus keeping the replicas at a good level of
utilization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content access; content delivery networks; dynamic
replica placement; user requests redirection",
}
@Article{Papadopoulos:2007:PPI,
author = "Fragkiskos Papadopoulos and Konstantinos Psounis",
title = "Predicting the performance of {Internet}-like networks
using scaled-down replicas",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "69--71",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328720",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Internet is a large, heterogeneous system
operating at very high speeds and consisting of a large
number of users. Researchers use a suite of tools and
techniques in order to understand the performance of
complex networks like the Internet: measurements,
simulations, and deployments on small to medium-scale
testbeds. This work considers a novel addition to this
suite: a class of methods to {\em scale down\/} the
{\em topology\/} of the Internet that enables
researchers to create and observe a smaller replica,
and extrapolate its performance to the expected
performance of the larger Internet.\par
The key insight that we leverage is that only the
congested links along the path of each flow introduce
sizable queueing delays and dependencies among flows.
Hence, one might hope that the network properties can
be captured by a topology that consists of the
congested links only. We have verified this in [11, 12]
using extensive simulations with TCP traffic and
theoretical analysis. Further, we have also shown that
simulating a scaled topology can be up to two orders of
magnitude faster than simulating the original topology.
However, a main assumption of our approach was that
un-congested links are known in advance.\par
We are currently working on establishing rules that can
be used to efficiently identify uncongested links in
large and complex networks like the Internet, when
these are not known, and which can be ignored when
building scaled-down network replicas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shamsi:2007:PPS,
author = "Jawwad Shamsi and Monica Brockmeyer",
title = "{PSON}: predictable service overlay networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "72--74",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328721",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Predictable Service Overlay Networks (PSON) improve
the predictability of Internet communication by
providing an estimate of the upper bound on message
latency for each path of the overlay. The upper bound
serves as an assurance of synchrony and enables
applications to order events or make inferences based
on non-receipt of a message. For improved performance,
PSON also employs overlay routing and overlay
configuration. Messages are routed either through the
direct overlay path or via a one-hop overlay path such
that the selected path is stable and promotes
synchrony, while the overlay configuration mechanisms
are utilized in order to select nodes that promote
predictable communication. The expected impact of PSON
is that by utilizing intelligent techniques such as
upper bound estimation, routing and configuration, it
can harness the unexpected and unreliable Internet
substrate to provide a predictable communication
overlay for applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet synchrony; overlay networks; predictable
communication",
}
@Article{Gilmore:2008:F,
author = "Stephen Gilmore and Jane Hillston",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "2--2",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364649",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The four papers in this special issue apply tools and
techniques from computer performance evaluation in the
very different domain of modelling biological systems.
This might seem to be a very odd thing to do but the
practice of analysing biological systems in this way is
becoming increasing common. As data about the internal
components of biological systems is becoming more
readily available, biologists are increasingly asking
questions about {\em how\/} systems function. In
addition to conducting laboratory experiments, they are
supported in this exploration by {\em in silico\/}
experimentation based on models. The view taken of the
biological processes focusses on the stimuli and
responses, a view akin to that taken of engineered
systems in systems engineering. Thus this new endeavour
in biology is known as {\em Systems
Biology}.\par
Performance analysts have a long tradition of modelling
systems in order to understand and predict their
function. Their focus is particularly on the dynamic
aspects of the system, the use of, and contention for,
resources, and the impact of uncertainty or randomness.
These issues are important in the biological setting
also, and so it is perhaps inevitable that we see some
people and techniques from performance modelling being
applied in systems biology. In particular some of the
high-level modelling formalisms which have supported
Markovian performance modelling in the last few decades
(stochastic Petri nets, stochastic process algebras,
etc.) are being applied in the biological domain.
Furthermore analysis techniques, such as Markovian
analysis, Monte Carlo simulation and probabilistic
model checking have also been adopted.\par
In this volume we have sought to give a snapshot of a
variety of work which is going on at this interface
between systems biology and more traditional
quantitative analysis techniques. It is by no means an
exhaustive account of this exciting area, but rather a
taster which will hopefully whet your appetite to find
out more.\par
To open the volume, the editors provide a survey paper
describing the motivations and goals of the systems
biology endeavour, summarising the existing modelling
techniques and outlining some instances of cross-over
between performance modelling and systems biology. This
includes an account of the use of ordinary differential
equations (ODE) and stochastic simulation to analyse
biological systems, and the adoption of high-level
modelling formalisms such as Petri nets and process
algebras to drive these ODE models and
simulations.\par
In their paper Kwiatkowska, Norman and Parker show the
application of logic and probabilistic model checking
to the analysis of biological signalling pathways. They
use the PRISM probabilistic model-checker to check
formulae of the CSL logic against CTMC-based models of
the MAPK cascade, a sequence of biochemical reactions
which sends a message within a cell. The paper provides
an introduction to the CSL logic as well as the
reactive modules language implemented by the PRISM
model checker. Performance measures of interest are
described using reward structures and the analysis
achieved by PRISM is able to show how the percentage of
activated MAPK, a key component of the pathway, and the
number of MAPK-MAPKK reactions, vary as a function of
time, for different values of the initial number of
MAPKs.\par
The paper by Jeschke, Ewald, Park, Fujimoto and
Uhrmacher addresses the drive for increased physical
accuracy in simulation models which represent the
spatial aspects of cell biology. Standard approaches to
stochastic simulation of cellular systems assume that
the cell is a homogeneous soup of biochemical
components. The truth is far removed from this, as the
cell has a lot of internal structure which can have a
profound effect on the dynamics of reactions. Setting
aside the assumption that the reacting chemical species
are well-stirred, spatial approaches divide the volume
into sub-volumes and apply a structured method which
identifies the next reaction to occur in each
subvolume. The cost of such an increase in accuracy in
the simulation model is a much increased running time
so the authors use a parallel and distributed approach
to improve performance.\par
To close this special issue we have a paper by DemattB,
Priami and Romanel which uses the BlenX language and
the Beta Workbench software to analyse the MAPK pathway
considered also by Kwiatkowska, Norman and Parker. The
BlenX language, and the Beta-binders process calculus
which was its inspiration, are examples of a new
generation of languages which have been designed
specifically for the biological domain, as an
alternative to using existing languages designed for
modelling computer systems. The paper shows how a
well-designed platform for modelling and simulation can
lift the user's experience and make their use of
process calculi more valuable, delivering insights
which would not have been seen otherwise.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gilmore:2008:PEC,
author = "Stephen Gilmore and Jane Hillston",
title = "Performance evaluation comes to life: quantitative
methods applied to biological systems",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "3--13",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364650",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an introduction to the use of
quantitative methods in modelling and analysis of
biological systems. We begin with a survey of the
methods presently in widespread use in computational
biology. We then continue to consider how the modelling
techniques and tools which have been used successfully
in performance evaluation studies of hardware and
software systems are now being applied to model
functions and processes in living systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "biochemical signalling pathways; stochastic process
algebra; systems biology",
}
@Article{Kwiatkowska:2008:UPM,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "Using probabilistic model checking in systems
biology",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "14--21",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364651",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Probabilistic model checking is a formal verification
framework for systems which exhibit stochastic
behaviour. It has been successfully applied to a wide
range of domains, including security and communication
protocols, distributed algorithms and power management.
In this paper we demonstrate its applicability to the
analysis of biological pathways and show how it can
yield a better understanding of the dynamics of these
systems. Through a case study of the MAP
(Mitogen-Activated Protein) Kinase cascade, we explain
how biological pathways can be modelled in the
probabilistic model checker PRISM and how this enables
the analysis of a rich selection of quantitative
properties.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jeschke:2008:PDD,
author = "Matthias Jeschke and Roland Ewald and Alfred Park and
Richard Fujimoto and Adelinde M. Uhrmacher",
title = "A parallel and distributed discrete event approach for
spatial cell-biological simulations",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "22--31",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364652",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As data and knowledge about cell-biological systems
increases so does the need for simulation tools to
support a hypothesis driven wet-lab experimentation.
Discrete event simulation has received a lot of
attention lately, however, often its application is
hampered by its lack of performance. One solution are
parallel, distributed approaches, however, their
application is limited by the amount of parallelism
available in the model. Recent studies have shown that
spatial aspects are crucial for cell biological
dynamics and they are also a promising candidate to
exploit parallelism. Promises and specific requirements
imposed by a spatial simulation of cell biological
systems will be illuminated by a parallel and
distributed variant of the Next-Subvolume Method (NSM),
which augments the Stochastic Simulation Algorithm
(SSA) with spatial features, and its realization in a
grid-inspired simulation system called Aurora.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dematte:2008:MSB,
author = "Lorenzo Dematt{\'e} and Corrado Priami and Alessandro
Romanel",
title = "Modelling and simulation of biological processes in
{BlenX}",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "32--39",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364653",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a scalable framework built upon the BlenX
language and inspired by the Beta-binders process
calculus to model, simulate and analyse biological
systems. We show the features of the Beta Workbench
framework on a running example based on the
mitogen-activated kinase pathway. We also discuss an
incremental modelling process that allows us to scale
up from pathway to network modelling and analysis. We
finally provide a comparison with related approaches
and some hints for future extensions of the
framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computational biology; modelling and simulation;
process calculi; systems biology",
}
@Article{Sommers:2008:SPR,
author = "Joel Sommers and Paul Barford and Albert Greenberg and
Walter Willinger",
title = "An {SLA} perspective on the router buffer sizing
problem",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "40--51",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364645",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we discuss recent work on buffer sizing
in the context of an ISP's need to offer and guarantee
competitive Service Level Agreements (SLAs) to its
customers. Since SLAs specify the performance that an
ISP guarantees to its customers, they provide critical
context for many configuration and provisioning
decisions and have specific relevance to buffer sizing.
We use a controlled laboratory environment to explore
the tradeoffs between buffer size and a set of
performance metrics over a range of traffic mixtures
for three different router designs. Our empirical study
reveals performance profiles that are surprisingly
robust to differences in router architecture and
traffic mix and suggests a design space within which
buffer sizing decisions can be made in practice. We
then present a preliminary approach for making buffer
sizing decisions within this framework that relates
directly to performance and provisioning requirements
in SLAs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korzun:2008:DMR,
author = "Dmitry Korzun and Andrei Gurtov",
title = "A {Diophantine} model of routes in structured {P2P}
overlays",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "52--61",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364646",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An important problem in any structured Peer-to-Peer
(P2P) overlay is what routes are available between
peers. Understanding the structure of routes helps to
solve challenging problems related to routing
performance, security, and scalability. In this paper,
we propose a theoretical approach for describing
routes. It is based on a recent result in the linear
Diophantine analysis and introduces a novel Diophantine
model of P2P routes. Such a route aggregates several
P2P paths that packets follow. A commutative
context-free grammar describes the forwarding behavior
of P2P nodes. Derivations in the grammar correspond to
P2P routes. Initial and final strings of a derivation
define packet sources and destinations, respectively.
Based on that we construct a linear Diophantine
equation system, where any solution counts forwarding
actions in a route representing certain integral
properties. Therefore, P2P paths and their composition
into routes are described by a linear Diophantine
systems; its solutions (basis) define a structure of
P2P paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sibai:2008:EPS,
author = "Fadi N. Sibai",
title = "Evaluating the performance of single and multiple core
processors with {PCMARK{\reg}05} and benchmark
analysis",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "62--71",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364647",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "PCMark{\reg}05 [4, 8] is a highly popular synthetic
benchmark for evaluating the performance of personal
computers (PCs) with millions of downloads via the
Internet. Based on open source and commercial
applications, it measures the execution time of highly
representative code extracts of these applications and
reports scores reflecting the overall system
performance, the CPU performance, the memory subsystem
performance, the graphics subsystem performance, and
the disk subsystem performance. In this article, we
focus on the PCMark{\reg}05 CPU test suite which is
composed of 8 tests to measure the performance and
scalability of various Intel single- and dual-core
processors. Six of these tests run a single application
each. One test runs 2 multitasked applications in
parallel and another test runs 4 multitasked
applications simultaneously. We present the results of
executing this benchmark's CPU test suite on high end
Intel-based PC platforms with top of the line single
processor and dual core processors, present the results
of our profiling and hotspot analysis, shed some light
on this test suite's prominent microarchitecture events
and its active threads' distributions, and characterize
this suite's workload. These results help in
understanding the performance characteristics of this
popular benchmark and in guiding future processor
design enhancements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance benchmark; single and dual core
processors; workload characterization",
}
@Article{Bordenave:2008:PRM,
author = "Charles Bordenave and David McDonald and Alexandre
Proutiere",
title = "Performance of random medium access control, an
asymptotic approach",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "1--12",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375459",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random Medium-Access-Control (MAC) algorithms have
played an increasingly important role in the
development of wired and wireless Local Area Networks
(LANs) and yet the performance of even the simplest of
these algorithms, such as slotted-Aloha, are still not
clearly understood. In this paper we provide a general
and accurate method to analyze networks where
interfering users share a resource using random MAC
algorithms. We show that this method is asymptotically
exact when the number of users grows large, and explain
why it also provides extremely accurate performance
estimates even for small systems. We apply this
analysis to solve two open problems: (a) We address the
stability region of non-adaptive Aloha-like systems.
Specifically, we consider a fixed number of buffered
users receiving packets from independent exogenous
processes and accessing the resource using Aloha-like
algorithms. We provide an explicit expression to
approximate the stability region of this system, and
prove its accuracy. (b) We outline how to apply the
analysis to predict the performance of adaptive MAC
algorithms, such as the exponential back-off algorithm,
in a system where saturated users interact through
interference. In general, our analysis may be used to
quantify how far from optimality the simple MAC
algorithms used in LANs today are, and to determine if
more complicated (e.g. queue-based) algorithms proposed
in the literature could provide significant improvement
in performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aloha/CSMA; exponential back-off; stability",
}
@Article{Casale:2008:BAC,
author = "Giuliano Casale and Ningfang Mi and Evgenia Smirni",
title = "Bound analysis of closed queueing networks with
workload burstiness",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "13--24",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375460",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Burstiness and temporal dependence in service
processes are often found in multi-tier architectures
and storage devices and must be captured accurately in
capacity planning models as these features are
responsible of significant performance degradations.
However, existing models and approximations for
networks of first-come first-served (FCFS) queues with
general independent (GI) service are unable to predict
performance of systems with temporal dependence in
workloads.\par
To overcome this difficulty, we define and study a
class of closed queueing networks where service times
are represented by Markovian Arrival Processes (MAPs),
a class of point processes that can model general
distributions, but also temporal dependent features
such as burstiness in service times. We call these
models MAP queueing networks. We introduce provable
upper and lower bounds for arbitrary performance
indexes (e.g., throughput, response time, utilization)
that we call Linear Reduction (LR) bounds. Numerical
experiments indicate that LR bounds achieve a mean
accuracy error of 2 percent.\par
The result promotes LR bounds as a versatile and
reliable bounding methodology of the performance of
modern computer systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bound analysis; burstiness; closed systems; Markovian
arrival processes; nonrenewal service; queueing
networks; temporal dependence",
}
@Article{Wierman:2008:SDI,
author = "Adam Wierman and Misja Nuyens",
title = "Scheduling despite inexact job-size information",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "25--36",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375461",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the optimality of Shortest Remaining
Processing Time (SRPT) for mean response time, in
recent years many computer systems have used the
heuristic of `favoring small jobs' in order to
dramatically reduce user response times. However,
rarely do computer systems have knowledge of exact
remaining sizes. In this paper, we introduce the class
of $ \epsilon $-SMART policies, which formalizes the
heuristic of `favoring small jobs' in a way that
includes a wide range of policies that schedule using
inexact job-size information. Examples of $ \epsilon
$-SMART policies include (i) policies that use exact
size information, e.g., SRPT and PSJF, (ii) policies
that use job-size estimates, and (iii) policies that
use a finite number of size-based priority
levels.\par
For many $ \epsilon $-SMART policies, e.g., SRPT with
inexact job-size information, there are no analytic
results available in the literature. In this work, we
prove four main results: we derive upper and lower
bounds on the mean response time, the mean slowdown,
the response-time tail, and the conditional response
time of $ \epsilon $-SMART policies. In each case, the
results explicitly characterize the tradeoff between
the accuracy of the job-size information used to
prioritize and the performance of the resulting policy.
Thus, the results provide designers insight into how
accurate job-size information must be in order to
achieve desired performance guarantees.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "job size estimates; M/G/1; queueing; response time;
scheduling; shortest remaining processing time; SMART;
SRPT",
}
@Article{Lelarge:2008:NED,
author = "Marc Lelarge and Jean Bolot",
title = "Network externalities and the deployment of security
features and protocols in the {Internet}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "37--48",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375463",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Getting new security features and protocols to be
widely adopted and deployed in the Internet has been a
continuing challenge. There are several reasons for
this, in particular economic reasons arising from the
presence of network externalities. Indeed, like the
Internet itself, the technologies to secure it exhibit
network effects: their value to individual users
changes as other users decide to adopt them or not. In
particular, the benefits felt by early adopters of
security solutions might fall significantly below the
cost of adoption, making it difficult for those
solutions to gain attraction and get deployed at a
large scale.\par
Our goal in this paper is to model and quantify the
impact of such externalities on the adoptability and
deployment of security features and protocols in the
Internet. We study a network of interconnected agents,
which are subject to epidemic risks such as those
caused by propagating viruses and worms, and which can
decide whether or not to invest some amount to deploy
security solutions. Agents experience negative
externalities from other agents, as the risks faced by
an agent depend not only on the choices of that agent
(whether or not to invest in self-protection), but also
on those of the other agents. Expectations about
choices made by other agents then influence investments
in self-protection, resulting in a possibly suboptimal
outcome overall.\par
We present and solve an analytical model where the
agents are connected according to a variety of network
topologies. Borrowing ideas and techniques used in
statistical physics, we derive analytic solutions for
sparse random graphs, for which we obtain asymptotic
results. We show that we can explicitly identify the
impact of network externalities on the adoptability and
deployment of security features. In other words, we
identify both the economic and network properties that
determine the adoption of security technologies.
Therefore, we expect our results to provide useful
guidance for the design of new economic mechanisms and
for the development of network protocols likely to be
deployed at a large scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cascading; economics; epidemics; game theory; price of
anarchy; security",
}
@Article{Brosh:2008:DFT,
author = "Eli Brosh and Salman Abdul Baset and Dan Rubenstein
and Henning Schulzrinne",
title = "The delay-friendliness of {TCP}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "49--60",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375464",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TCP has traditionally been considered unfriendly for
real-time applications. Nonetheless, popular
applications such as Skype use TCP since UDP packets
cannot pass through many NATs and firewalls. Motivated
by this observation, we study the delay performance of
TCP for real-time media flows. We develop an analytical
performance model for the delay of TCP. We use
extensive experiments to validate the model and to
evaluate the impact of various TCP mechanisms on its
delay performance. Based on our results, we derive the
working region for VoIP and live video streaming
applications and provide guidelines for delay-friendly
TCP settings. Our research indicates that simple
application-level schemes, such as packet splitting and
parallel connections, can reduce the delay of real-time
TCP flows by as much as 30\% and 90\%, respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "live video streaming; performance modeling; TDP
congestion control; VoIP",
}
@Article{Kim:2008:SVR,
author = "Changhoon Kim and Alexandre Gerber and Carsten Lund
and Dan Pei and Subhabrata Sen",
title = "Scalable {VPN} routing via relaying",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "61--72",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375465",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Enterprise customers are increasingly adopting MPLS
(Multiprotocol Label Switching) VPN (Virtual Private
Network) service that offers direct any-to-any
reachability among the customer sites via a provider
network. Unfortunately this direct reachability model
makes the service provider's routing tables grow very
large as the number of VPNs and the number of routes
per customer increase. As a result, router memory in
the provider's network has become a key bottleneck in
provisioning new customers. This paper proposes
Relaying, a scalable VPN routing architecture that the
provider can implement simply by modifying the
configuration of routers in the provider network,
without requiring changes to the router hardware and
software. Relaying substantially reduces the memory
footprint of VPNs by choosing a small number of hub
routers in each VPN that maintain full reachability
information, and by allowing non-hub routers to reach
other routers through a hub. Deploying Relaying in
practice, however, poses a challenging optimization
problem that involves minimizing router memory usage by
having as few hubs as possible, while limiting the
additional latency due to indirect delivery via a hub.
We first investigate the fundamental tension between
the two objectives and then develop algorithms to solve
the optimization problem by leveraging some unique
properties of VPNs, such as sparsity of traffic
matrices and spatial locality of customer sites.
Extensive evaluations using real traffic matrices,
routing configurations, and VPN topologies demonstrate
that Relaying is very promising and can reduce
routing-table usage by up to 90\%, while increasing the
additional distances traversed by traffic by only a few
hundred miles, and the backbone bandwidth usage by less
than 10\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measurement; optimization; routing; VPN",
}
@Article{Tschopp:2008:HRD,
author = "Dominique Tschopp and Suhas Diggavi and Matthias
Grossglauser",
title = "Hierarchical routing over dynamic wireless networks",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "73--84",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375467",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In dynamic networks the topology evolves and routes
are maintained by frequent updates, consuming
throughput available for data transmission. We ask
whether there exist low-overhead schemes for these
networks, that produce routes that are within a small
constant factor (stretch) of the optimal route-length.
This is studied by using the underlying geometric
properties of the connectivity graph in wireless
networks. For a class of models for wireless network
that fulfill some mild conditions on the connectivity
and on mobility over the time of interest, we can
design distributed routing algorithm that maintain the
routes over a changing topology. This scheme needs only
node identities and integrates location service along
with routing, therefore accounting for the complete
overhead. We analyze the worst-case (conservative)
overhead and route-quality (stretch) performance of
this algorithm for the aforementioned class of models.
Our algorithm allows constant stretch routing with a
network wide control traffic overhead of $ O(n \log^2
n) $ bits per mobility time step (time-scale of
topology change) translating to $ O(\log^2 n) $
overhead per node (with high probability for wireless
networks with such mobility model). We can reduce the
maximum overhead per node by using a load-balancing
technique at the cost of a slightly higher average
overhead. Numerics show that these bounds are quite
conservative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "competitive analysis; distributed routing algorithms;
geometric random graphs; wireless networks",
}
@Article{Rayanchu:2008:LAN,
author = "Shravan Rayanchu and Sayandeep Sen and Jianming Wu and
Suman Banerjee and Sudipta Sengupta",
title = "Loss-aware network coding for unicast wireless
sessions: design, implementation, and performance
evaluation",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "85--96",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375468",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Local network coding is growing in prominence as a
technique to facilitate greater capacity utilization in
multi-hop wireless networks. A specific objective of
such local network coding techniques has been to
explicitly minimize the total number of transmissions
needed to carry packets across each wireless hop. While
such a strategy is certainly useful, we argue that in
lossy wireless environments, a better use of local
network coding is to provide higher levels of
redundancy even at the cost of increasing the number of
transmissions required to communicate the same
information. In this paper we show that the design
space for effective redundancy in local network coding
is quite large, which makes optimal formulations of the
problem hard to realize in practice. We present a
detailed exploration of this design space and propose a
suite of algorithms, called CLONE, that can lead to
further throughput gains in multi-hop wireless
scenarios. Through careful analysis, simulations, and
detailed implementation on a real testbed, we show that
some of our simplest CLONE algorithms can be
efficiently implemented in today's wireless hardware to
provide a factor of two improvement in throughput for
example scenarios, while other, more effective, CLONE
algorithms require additional advances in hardware
processing speeds to be deployable in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "IEEE 802.11; network coding; wireless networks",
}
@Article{Schmid:2008:EMV,
author = "Thomas Schmid and Zainul Charbiwala and Jonathan
Friedman and Young H. Cho and Mani B. Srivastava",
title = "Exploiting manufacturing variations for compensating
environment-induced clock drift in time
synchronization",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "97--108",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375469",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Time synchronization is an essential service in
distributed computing and control systems. It is used
to enable tasks such as synchronized data sampling and
accurate time-of-flight estimation, which can be used
to locate nodes. The deviation in nodes' knowledge of
time and inter-node resynchronization rate are affected
by three sources of time stamping errors: network
wireless communication delays, platform hardware and
software delays, and environment-dependent frequency
drift characteristics of the clock source. The focus of
this work is on the last source of error, the clock
source, which becomes a bottleneck when either required
time accuracy or available energy budget and bandwidth
(and thus feasible resynchronization rate) are too
stringent. Traditionally, this has required the use of
expensive clock sources (such as temperature
compensation using precise sensors and calibration
models) that are not cost-effective in low-end wireless
sensor nodes. Since the frequency of a crystal is a
product of manufacturing and environmental parameters,
we describe an approach that exploits the subtle
manufacturing variation between a pair of inexpensive
oscillators placed in close proximity to
algorithmically compensate for the drift produced by
the environment. The algorithm effectively uses the
oscillators themselves as a sensor that can detect
changes in frequency caused by a variety of
environmental factors. We analyze the performance of
our approach using behavioral models of crystal
oscillators in our algorithm simulation. Then we apply
the algorithm to an actual temperature dataset
collected at the James Wildlife Reserve in Riverside
County, California, and test the algorithms on a
waveform generator based testbed. The result of our
experiments show that the technique can effectively
improve the frequency stability of an inexpensive
uncompensated crystal 5 times with the potential for
even higher gains in future implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clocks; emulation; oscillator; time synchronization",
}
@Article{Cohen:2008:CEM,
author = "Edith Cohen and Nick Duffield and Carsten Lund and
Mikkel Thorup",
title = "Confident estimation for multistage measurement
sampling and aggregation",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "109--120",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375471",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement, collection, and interpretation of network
usage data commonly involves multiple stage of sampling
and aggregation. Examples include sampling packets,
aggregating them into flow statistics at a router,
sampling and aggregation of usage records in a network
data repository for reporting, query and archiving.
Although unbiased estimates of packet, bytes and flows
usage can be formed for each sampling operation, for
many applications it is crucial to know the inherent
estimation error. Previous work in this area has been
limited mainly to analyzing the estimator variance for
particular methods, e.g., independent packet sampling.
However, the variance is of limited use for more
general sampling methods, where the estimate may not be
well approximated by a Gaussian distribution.\par
This motivates our paper, in which we establish
Chernoff bounds on the likelihood of estimation error
in a general multistage combination of measurement
sampling and aggregation. We derive the scale against
which errors are measured, in terms of the constituent
sampling and aggregation operations. In particular this
enables us to obtain rigorous confidence intervals
around any given estimate. We apply our method to a
number of sampling schemes both in the literature and
currently deployed, including sampling of packet
sampled NetFlow records, Sample and Hold, and Flow
Slicing. We obtain one particularly striking result in
the first case: that for a range of parameterizations,
packet sampling has no additional impact on the
estimator confidence derived from our bound, beyond
that already imposed by flow sampling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "confidence intervals; estimation; network measurement;
sampling",
}
@Article{Lu:2008:CBN,
author = "Yi Lu and Andrea Montanari and Balaji Prabhakar and
Sarang Dharmapurikar and Abdul Kabbani",
title = "Counter braids: a novel counter architecture for
per-flow measurement",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "121--132",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375472",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fine-grained network measurement requires routers and
switches to update large arrays of counters at very
high link speed (e.g. 40 Gbps). A naive algorithm needs
an infeasible amount of SRAM to store both the counters
and a flow-to-counter association rule, so that
arriving packets can update corresponding counters at
link speed. This has made accurate per-flow measurement
complex and expensive, and motivated approximate
methods that detect and measure only the large
flows.\par
This paper revisits the problem of accurate per-flow
measurement. We present a counter architecture, called
Counter Braids, inspired by sparse random graph codes.
In a nutshell, Counter Braids `compresses while
counting'. It solves the central problems (counter
space and flow-to-counter association) of per-flow
measurement by `braiding' a hierarchy of counters with
random graphs. Braiding results in drastic space
reduction by sharing counters among flows; and using
random graphs generated on-the-fly with hash functions
avoids the storage of flow-to-counter
association.\par
The Counter Braids architecture is optimal (albeit with
a complex decoder) as it achieves the maximum
compression rate asymptotically. For implementation, we
present a low-complexity message passing decoding
algorithm, which can recover flow sizes with
essentially zero error. Evaluation on Internet traces
demonstrates that almost all flow sizes are recovered
exactly with only a few bits of counter space per
flow.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "message passing algorithms; network measurement;
statistic counters",
}
@Article{Anandkumar:2008:TSB,
author = "Animashree Anandkumar and Chatschik Bisdikian and
Dakshi Agrawal",
title = "Tracking in a spaghetti bowl: monitoring transactions
using footprints",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "133--144",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375473",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of tracking end-to-end service-level
transactions in the absence of instrumentation support
is considered. The transaction instances progress
through a state-transition model and generate
time-stamped footprints on entering each state in the
model. The goal is to track individual transactions
using these footprints even when the footprints may not
contain any tokens uniquely identifying the transaction
instances that generated them. Assuming a semi-Markov
process model for state transitions, the transaction
instances are tracked probabilistically by matching
them to the available footprints according to the
maximum likelihood (ML) criterion. Under the ML-rule,
for a two-state system, it is shown that the
probability that all the instances are matched
correctly is minimized when the transition times are
i.i.d. exponentially distributed. When the transition
times are i.i.d. distributed, the ML-rule reduces to a
minimum weight bipartite matching and reduces further
to a first-in first-out match for a special class of
distributions. For a multi-state model with an acyclic
state transition digraph, a constructive proof shows
that the ML-rule reduces to splicing the results of
independent matching of many bipartite systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bipartite matching; maximum-likelihood tracking;
semi-Markov process; transaction monitoring",
}
@Article{Singhal:2008:OSS,
author = "Harsh Singhal and George Michailidis",
title = "Optimal sampling in state space models with
applications to network monitoring",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "145--156",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375474",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advances in networking technology have enabled network
engineers to use sampled data from routers to estimate
network flow volumes and track them over time. However,
low sampling rates result in large noise in traffic
volume estimates. We propose to combine data on
individual flows obtained from sampling with highly
aggregate data obtained from SNMP measurements (similar
to those used in network tomography) for the tracking
problem at hand. Specifically, we introduce a
linearized state space model for the estimation of
network traffic flow volumes from combined SNMP and
sampled data. Further, we formulate the problem of
obtaining optimal sampling rates under router resource
constraints as an experiment design problem.
Theoretically it corresponds to the problem of optimal
design for estimation of conditional means for state
space models and we present the associated convex
programs for a simple approach to it. The usefulness of
the approach in the context of network monitoring is
illustrated through an extensive numerical study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet traffic matrix estimation; Kalman filtering;
optimal design of experiments; state space models",
}
@Article{Ioannidis:2008:DHP,
author = "Stratis Ioannidis and Peter Marbach",
title = "On the design of hybrid peer-to-peer systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "157--168",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider hybrid peer-to-peer systems
where users form an unstructured peer-to-peer network
with the purpose of assisting a server in the
distribution of data. We present a mathematical model
that we use to analyze the scalability of hybrid
peer-to-peer systems under two query propagation
mechanisms: the random walk and the expanding ring. In
particular, we characterize how the query load at the
server, the load at peers as well as the query response
time scale as the number of users in the peer-to-peer
network increases. We show that, under a properly
designed random walk propagation mechanism, hybrid
peer-to-peer systems can support an unbounded number of
users while requiring only bounded resources both at
the server and at individual peers. This important
result shows that hybrid peer-to-peer systems have
excellent scalability properties. To the best of our
knowledge, this is the first time that a theoretical
study characterizing the scalability of such hybrid
peer-to-peer systems has been presented. We illustrate
our results through numerical studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "peer-to-peer; scalability",
}
@Article{Chen:2008:UMP,
author = "Minghua Chen and Miroslav Ponec and Sudipta Sengupta
and Jin Li and Philip A. Chou",
title = "Utility maximization in peer-to-peer systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "169--180",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study the problem of utility
maximization in P2P systems, in which aggregate
application-specific utilities are maximized by running
distributed algorithms on P2P nodes, which are
constrained by their uplink capacities. This may be
understood as extending Kelly's seminal framework from
single-path unicast over general topology to multi-path
multicast over P2P topology, with network coding
allowed. For certain classes of popular P2P topologies,
we show that routing along a linear number of trees per
source can achieve the largest rate region that can be
possibly obtained by (multi-source) network coding.
This simplification result allows us to develop a new
multi-tree routing formulation for the problem. Despite
of the negative results in literature on applying
Primal-dual algorithms to maximize utility under
multi-path settings, we have been able to develop a
Primal-dual distributed algorithm to maximize the
aggregate utility under the multi-path routing
environments. Utilizing our proposed sufficient
condition, we show global exponential convergence of
the Primal-dual algorithm to the optimal solution under
different P2P communication scenarios we study. The
algorithm can be implemented by utilizing only
end-to-end delay measurements between P2P nodes; hence,
it can be readily deployed on today's Internet. To
support this claim, we have implemented the Primal-dual
algorithm for use in a peer-assisted multi-party
conferencing system and evaluated its performance
through actual experiments on a LAN testbed and the
Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content distribution; multi-party video conferencing;
multicast; peer-to-peer; streaming; utility
maximization",
}
@Article{Simatos:2008:QSM,
author = "Florian Simatos and Philippe Robert and Fabrice
Guillemin",
title = "A queueing system for modeling a file sharing
principle",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "181--192",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate in this paper the performance of a
simple file sharing principle. For this purpose, we
consider a system composed of N peers becoming active
at exponential random times; the system is initiated
with only one server offering the desired file and the
other peers after becoming active try to download it.
Once the file has been downloaded by a peer, this one
immediately becomes a server. To investigate the
transient behavior of this file sharing system, we
study the instant when the system shifts from a
congested state where all servers available are
saturated by incoming demands to a state where a
growing number of servers are idle. In spite of its
apparent simplicity, this queueing model (with a random
number of servers) turns out to be quite difficult to
analyze. A formulation in terms of an urn and ball
model is proposed and corresponding scaling results are
derived. These asymptotic results are then compared
against simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file sharing; peer to peer; queueing systems;
transient analysis of Markov processes",
}
@Article{Goldberg:2008:PQM,
author = "Sharon Goldberg and David Xiao and Eran Tromer and
Boaz Barak and Jennifer Rexford",
title = "Path-quality monitoring in the presence of
adversaries",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "193--204",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Edge networks connected to the Internet need effective
monitoring techniques to drive routing decisions and
detect violations of Service Level Agreements (SLAs).
However, existing measurement tools, like ping,
traceroute, and trajectory sampling, are vulnerable to
attacks that can make a path look better than it really
is. In this paper, we design and analyze path-quality
monitoring protocols that reliably raise an alarm when
the packet-loss rate and delay exceed a threshold, even
when an adversary tries to bias monitoring results by
selectively delaying, dropping, modifying, injecting,
or preferentially treating packets.\par
Despite the strong threat model we consider in this
paper, our protocols are efficient enough to run at
line rate on high-speed routers. We present a secure
sketching protocol for identifying when packet loss and
delay degrade beyond a threshold. This protocol is
extremely lightweight, requiring only 250-600 bytes of
storage and periodic transmission of a comparably sized
IP packet to monitor billions of packets. We also
present secure sampling protocols that provide faster
feedback and accurate round-trip delay estimates, at
the expense of somewhat higher storage and
communication costs. We prove that all our protocols
satisfy a precise definition of secure path-quality
monitoring and derive analytic expressions for the
trade-off between statistical accuracy and system
overhead. We also compare how our protocols perform in
the client-server setting, when paths are asymmetric,
and when packet marking is not permitted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cryptography; path-quality monitoring; sampling;
sketching",
}
@Article{Pedarsani:2008:DAS,
author = "Pedram Pedarsani and Daniel R. Figueiredo and Matthias
Grossglauser",
title = "Densification arising from sampling fixed graphs",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "205--216",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During the past decade, a number of different studies
have identified several peculiar properties of networks
that arise from a diverse universe, ranging from social
to computer networks. A recently observed feature is
known as network densification, which occurs when the
number of edges grows much faster than the number of
nodes, as the network evolves over time. This
surprising phenomenon has been empirically validated in
a variety of networks that emerge in the real world and
mathematical models have been recently proposed to
explain it. Leveraging on how real data is usually
gathered and used, we propose a new model called Edge
Sampling to explain how densification can arise. Our
model is innovative, as we consider a fixed underlying
graph and a process that discovers this graph by
probabilistically sampling its edges. We show that this
model possesses several interesting features, in
particular, that edges and nodes discovered can exhibit
densification. Moreover, when the node degree of the
fixed underlying graph follows a heavy-tailed
distribution, we show that the Edge Sampling model can
yield power law densification, establishing an
approximate relationship between the degree exponent
and the densification exponent. The theoretical
findings are supported by numerical evaluations of the
model. Finally, we apply our model to real network data
to evaluate its performance on capturing the previously
observed densification. Our results indicate that edge
sampling is indeed a plausible alternative explanation
for the densification phenomenon that has been recently
observed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "densification; edge sampling; network modeling",
}
@Article{Oliveira:2008:SEG,
author = "Ricardo V. Oliveira and Dan Pei and Walter Willinger
and Beichuan Zhang and Lixia Zhang",
title = "In search of the elusive ground truth: the
{Internet}'s as-level connectivity structure",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "217--228",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite significant efforts to obtain an accurate
picture of the Internet's actual connectivity structure
at the level of individual autonomous systems (ASes),
much has remained unknown in terms of the quality of
the inferred AS maps that have been widely used by the
research community. In this paper we assess the quality
of the inferred Internet maps through case studies of a
set of ASes. These case studies allow us to establish
the ground truth of AS-level Internet connectivity
between the set of ASes and their directly connected
neighbors. They also enable a direct comparison between
the ground truth and inferred topology maps and yield
new insights into questions such as which parts of the
actual topology are adequately captured by the inferred
maps, and which parts are missing and why. This
information is critical in assessing for what kinds of
real-world networking problems the use of currently
inferred AS maps or proposed AS topology models are, or
are not, appropriate. More importantly, our newly
gained insights also point to new directions towards
building realistic and economically viable Internet
topology maps.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; inter-domain routing; Internet topology",
}
@Article{Bao:2008:HPI,
author = "Yungang Bao and Mingyu Chen and Yuan Ruan and Li Liu
and Jianping Fan and Qingbo Yuan and Bo Song and
Jianwei Xu",
title = "{HMTT}: a platform independent full-system memory
trace monitoring system",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "229--240",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Memory trace analysis is an important technology for
architecture research, system software (i.e., OS,
compiler) optimization, and application performance
improvements. Many approaches have been used to track
memory trace, such as simulation, binary
instrumentation and hardware snooping. However, they
usually have limitations of time, accuracy and
capacity.\par
In this paper we propose a platform independent memory
trace monitoring system, which is able to track virtual
memory reference trace of full systems (including OS,
VMMs, libraries, and applications). The system adopts a
DIMM-snooping mechanism that uses hardware boards
plugged in DIMM slots to snoop. There are several
advantages in this approach, such as fast, complete,
undistorted, and portable. Three key techniques are
proposed to address the system design challenges with
this mechanism: (1) To keep up with memory speeds, the
DDR protocol state machine is simplified, and large
FIFOs are added between the state machine and the trace
transmitting logic to handle burst memory accesses; (2)
To reconstruct physical-to-virtual mapping and
distinguish one process' address space from others, an
OS kernel module, which collects page table
information, and a synchronization mechanism, which
synchronizes the page table information with the memory
race, are developed; (3) To dump massive trace data, we
employ a straightforward method to compress the trace
and use Gigabit Ethernet and RAID to send and receive
the compressed trace.\par
We present our implementation of an initial monitoring
system, named HMTT (Hyper Memory Trace Tracker). Using
HMTT, we have observed that burst bandwidth utilization
is much larger than average bandwidth utilization, by
up to 5X in desktop applications. We have also
confirmed that the stream memory accesses of many
applications contribute even more than 40\% of L2 Cache
misses and OS virtual memory management may decrease
stream accesses in view of memory controller (or L2
Cache), by up to 30.2\%. Moreover, we have evaluated OS
impact on memory performance in real systems. The
evaluations and case studies show the feasibility and
effectiveness of our proposed monitoring mechanism and
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DIMM; HMTT; memory trace; real system",
}
@Article{Iliadis:2008:DSV,
author = "Ilias Iliadis and Robert Haas and Xiao-Yu Hu and
Evangelos Eleftheriou",
title = "Disk scrubbing versus intra-disk redundancy for
high-reliability raid storage systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "241--252",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two schemes proposed to cope with unrecoverable or
latent media errors and enhance the reliability of RAID
systems are examined. The first scheme is the
established, widely used disk scrubbing scheme, which
operates by periodically accessing disk drives to
detect media-related unrecoverable errors. These errors
are subsequently corrected by rebuilding the sectors
affected. The second scheme is the recently proposed
intradisk redundancy scheme which uses a further level
of redundancy inside each disk, in addition to the RAID
redundancy across multiple disks. Analytic results are
obtained assuming Poisson arrivals of random I/O
requests. Our results demonstrate that the reliability
improvement due to disk scrubbing depends on the
scrubbing frequency and the workload of the system, and
may not reach the reliability level achieved by a
simple IPC-based intra-disk redundancy scheme, which is
insensitive to the workload. In fact, the IPC-based
intra-disk redundancy scheme achieves essentially the
same reliability as that of a system operating without
unrecoverable sector errors. For heavy workloads, the
reliability achieved by the scrubbing scheme can be
orders of magnitude less than that of the intra-disk
redundancy scheme.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "MTTDL; RAID; reliability analysis; stochastic
modeling; unrecoverable or latent sector errors",
}
@Article{Thereska:2008:IRP,
author = "Eno Thereska and Gregory R. Ganger",
title = "{Ironmodel}: robust performance models in the wild",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "253--264",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional performance models are too brittle to be
relied on for continuous capacity planning and
performance debugging in many computer systems. Simply
put, a brittle model is often inaccurate and incorrect.
We find two types of reasons why a model's prediction
might diverge from the reality: (1) the underlying
system might be misconfigured or buggy or (2) the
model's assumptions might be incorrect. The extra
effort of manually finding and fixing the source of
these discrepancies, continuously, in both the system
and model, is one reason why many system designers and
administrators avoid using mathematical models
altogether. Instead, they opt for simple, but often
inaccurate, `rules-of-thumb'.\par
This paper describes IRONModel, a robust performance
modeling architecture. Through studying performance
anomalies encountered in an experimental cluster-based
storage system, we analyze why and how models and
actual system implementations get out-of-sync. Lessons
learned from that study are incorporated into
IRONModel. IRONModel leverages the redundancy of
high-level system specifications described through
models and low-level system implementation to localize
many types of system-model inconsistencies. IRONModel
can guide designers to the potential source of the
discrepancy, and, if appropriate, can
semi-automatically evolve the models to handle
unanticipated inputs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active probing; behavioral modeling; management;
what-if",
}
@Article{Liu:2008:XFS,
author = "Alex X. Liu and Fei Chen and JeeHyun Hwang and Tao
Xie",
title = "{Xengine}: a fast and scalable {XACML} policy
evaluation engine",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "265--276",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "XACML has become the de facto standard for specifying
access control policies for various applications,
especially web services. With the explosive growth of
web applications deployed on the Internet, XACML
policies grow rapidly in size and complexity, which
leads to longer request processing time. This paper
concerns the performance of request processing, which
is a critical issue and so far has been overlooked by
the research community. In this paper, we propose
XEngine, a scheme for efficient XACML policy
evaluation. XEngine first converts a textual XACML
policy to a numerical policy. Second, it converts a
numerical policy with complex structures to a numerical
policy with a normalized structure. Third, it converts
the normalized numerical policy to tree data structures
for efficient processing of requests. To evaluate the
performance of XEngine, we conducted extensive
experiments on both real-life and synthetic XACML
policies. The experimental results show that XEngine is
orders of magnitude more efficient than Sun PDP, and
the performance difference between XEngine and Sun PDP
grows almost linearly with the number of rules in XACML
policies. For XACML policies of small sizes (with
hundreds of rules), XEngine is one to two orders of
magnitude faster than the widely deployed Sun PDP. For
XACML policies of large sizes (with thousands of
rules), XEngine is three to four orders of magnitude
faster than Sun PDP.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "access control; policy decision point (PDP); policy
enforcement point (PEP); policy evaluation; web server;
XACML",
}
@Article{Traeger:2008:DDA,
author = "Avishay Traeger and Ivan Deras and Erez Zadok",
title = "{DARC}: dynamic analysis of root causes of latency
distributions",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "277--288",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375489",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "OSprof is a versatile, portable, and efficient
profiling methodology based on the analysis of latency
distributions. Although OSprof has offers several
unique benefits and has been used to uncover several
interesting performance problems, the latency
distributions that it provides must be analyzed
manually. These latency distributions are presented as
histograms and contain distinct groups of data, called
peaks, that characterize the overall behavior of the
running code. By automating the analysis process, we
make it easier to take advantage of OSprof's unique
features.\par
We have developed the Dynamic Analysis of Root Causes
system (DARC), which finds root cause paths in a
running program's call-graph using runtime latency
analysis. A root cause path is a call-path that starts
at a given function and includes the largest latency
contributors to a given peak. These paths are the main
causes for the high-level behavior that is represented
as a peak in an OSprof histogram. DARC performs PID and
call-path filtering to reduce overheads and
perturbations, and can handle recursive and indirect
calls. DARC can analyze preemptive behavior and
asynchronous call-paths, and can also resume its
analysis from a previous state, which is useful when
analyzing short-running programs or specific phases of
a program's execution.\par
We present DARC and show its usefulness by analyzing
behaviors that were observed in several interesting
scenarios. We also show that DARC has negligible
elapsed time overheads for normal use cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic instrumentation; root cause",
}
@Article{Chaitanya:2008:QQM,
author = "Shiva Chaitanya and Bhuvan Urgaonkar and Anand
Sivasubramaniam",
title = "{QDSL}: a queuing model for systems with differential
service levels",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "289--300",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A feature exhibited by many modern computing systems
is their ability to improve the quality of output they
generate for a given input by spending more computing
resources on processing it. Often this improvement
comes at the price of degraded performance in the form
of reduced throughput or increased response time. We
formulate QDSL, a class of constrained optimization
problems defined in the context of a queueing server
equipped with multiple levels of service. Solutions to
QDSL provide rules for dynamically varying the service
level to achieve desired trade-offs between output
quality and performance. Our approach involves reducing
restricted versions of such systems to Markov Decision
Processes. We find two variants of such systems worth
studying: (i) VarSL, in which a single request may be
serviced using a combination of multiple levels during
its lifetime and (ii) FixSL in which the service level
may not change during the lifetime of a request. Our
modeling indicates that optimal service level selection
policies in these systems correspond to very simple
rules that can be implemented very efficiently in
realistic, online systems. We find our policies to be
useful in two response-time-sensitive real-world
systems: (i) qSecStore, an iSCSI-based secure storage
system that has access to multiple encryption
functions, and (ii) qPowServer, a server with
DVFS-capable processor. As a representative result, in
an instance of qSecStore serving disk requests derived
from the well-regarded TPC-H traces, we are able to
improve the fraction of requests using more reliable
encryption functions by 40-60\%, while meeting
performance targets. In a simulation of qPowServer
employing realistic DVFS parameters, we are able to
improve response times significantly while only
violating specified server-wide power budgets by less
than 5W.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "differential service levels; dynamic voltage frequency
scaling; Markov decision process; secure storage",
}
@Article{Parvez:2008:ABL,
author = "Nadim Parvez and Carey Williamson and Anirban Mahanti
and Niklas Carlsson",
title = "Analysis of {BitTorrent}-like protocols for on-demand
stored media streaming",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "301--312",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops analytic models that characterize
the behavior of on-demand stored media content delivery
using BitTorrent-like protocols. The models capture the
effects of different piece selection policies,
including Rarest-First and two variants of In-Order.
Our models provide insight into transient and
steady-state system behavior, and help explain the
sluggishness of the system with strict In-Order
streaming. We use the models to compare different
retrieval policies across a wide range of system
parameters, including peer arrival rate,
upload/download bandwidth, and seed residence time. We
also provide quantitative results on the startup delays
and retrieval times for streaming media delivery. Our
results provide insights into the optimal design of
peer-to-peer networks for on-demand media streaming.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; on-demand streaming; peer-to-peer
systems",
}
@Article{Liu:2008:PBP,
author = "Shao Liu and Rui Zhang-Shen and Wenjie Jiang and
Jennifer Rexford and Mung Chiang",
title = "Performance bounds for peer-assisted live streaming",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "313--324",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375493",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peer-assisted streaming is a promising way for service
providers to offer high-quality IPTV to consumers at
reasonable cost. In peer-assisted streaming, the peers
exchange video chunks with one another, and receive
additional data from the central server as needed. In
this paper, we analyze how to provision resources for
the streaming system, in terms of the server capacity,
the video quality, and the depth of the distribution
trees that deliver the content. We derive the
performance bounds for minimum server load, maximum
streaming rate, and minimum tree depth under different
peer selection constraints. Furthermore, we show that
our performance bounds are actually tight, by
presenting algorithms for constructing trees that
achieve our bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "IPTV; peer-to-peer; streaming; tree construction;
video",
}
@Article{Bonald:2008:ELS,
author = "Thomas Bonald and Laurent Massouli{\'e} and Fabien
Mathieu and Diego Perino and Andrew Twigg",
title = "Epidemic live streaming: optimal performance
trade-offs",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "325--336",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375494",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several peer-to-peer systems for live streaming have
been recently deployed (e.g. CoolStreaming, PPLive,
SopCast). These all rely on distributed, epidemic-style
dissemination mechanisms. Despite their popularity, the
fundamental performance trade-offs of such mechanisms
are still poorly understood. In this paper we propose
several results that contribute to the understanding of
such trade-offs.\par
Specifically, we prove that the so-called random peer,
latest useful chunk mechanism can achieve dissemination
at an optimal rate and within an optimal delay, up to
an additive constant term. This qualitative result
suggests that epidemic live streaming algorithms can
achieve near-unbeatable rates and delays. Using
mean-field approximations, we also derive recursive
formulas for the diffusion function of two schemes
referred to as latest blind chunk, random peer and
latest blind chunk, random useful peer.\par
Finally, we provide simulation results that validate
the above theoretical results and allow us to compare
the performance of various practically interesting
diffusion schemes terms of delay, rate, and control
overhead. In particular, we identify several peer/chunk
selection algorithms that achieve near-optimal
performance trade-offs. Moreover, we show that the
control overhead needed to implement these algorithms
may be reduced by restricting the neighborhood of each
peer without substantial performance degradation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay optimality; epidemic diffusion; p2p live
streaming",
}
@Article{Lin:2008:STM,
author = "Jiang Lin and Hongzhong Zheng and Zhichun Zhu and
Eugene Gorbatov and Howard David and Zhao Zhang",
title = "Software thermal management of {DRAM} memory for
multicore systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "337--348",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375496",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Thermal management of DRAM memory has become a
critical issue for server systems. We have done, to our
best knowledge, the first study of software thermal
management for memory subsystem on real machines. Two
recently proposed DTM (Dynamic Thermal Management)
policies have been improved and implemented in Linux OS
and evaluated on two multicore servers, a Dell
PowerEdge 1950 server and a customized Intel SR1500AL
server testbed. The experimental results first confirm
that a system-level memory DTM policy may significantly
improve system performance and power efficiency,
compared with existing memory bandwidth throttling
scheme. A policy called DTM-ACG (Adaptive Core Gating)
shows performance improvement comparable to that
reported previously. The average performance
improvements are 13.3\% and 7.2\% on the PowerEdge 1950
and the SR1500AL (vs. 16.3\% from the previous
simulation-based study), respectively. We also have
surprising findings that reveal the weakness of the
previous study: the CPU heat dissipation and its impact
on DRAM memories, which were ignored, are significant
factors. We have observed that the second policy,
called DTM-CDVFS (Coordinated Dynamic Voltage and
Frequency Scaling), has much better performance than
previously reported for this reason. The average
improvements are 10.8\% and 15.3\% on the two machines
(vs. 3.4\% from the previous study), respectively. It
also significantly reduces the processor power by
15.5\% and energy by 22.7\% on average.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DRAM memories; thermal management",
}
@Article{Menache:2008:NPC,
author = "Ishai Menache and Nahum Shimkin",
title = "Noncooperative power control and transmission
scheduling in wireless collision channels",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "349--358",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a wireless collision channel, shared by a
finite number of mobile users who transmit to a common
base station using a random access protocol. Mobiles
are self-optimizing, and wish to minimize their
individual average power investment subject to
minimum-throughput demand. The channel state between
each mobile and the base station is stochastically
time-varying and is observed by the mobile prior to
transmission. Given the current channel state, a mobile
may decide whether to transmit or not, and to determine
the transmission power in case of transmission. In this
paper, we investigate the properties of the Nash
equilibrium of the resulting game in multiuser
networks.\par
We characterize the best-response strategy of the
mobile and show that it leads to a `water-filling'-like
power allocation. Our equilibrium analysis then reveals
that one of the possible equilibria is uniformly best
for all mobiles. Furthermore, this equilibrium can be
reached by a simple distributed mechanism that does not
require specific information on other mobiles' actions.
We then explore some additional characteristics of the
distributed power control framework. Braess-like
paradoxes are reported, where the use of multiple power
levels can diminish system capacity and also lead to
larger per-user power consumption, compared to the case
where a single level only is permitted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel state information; non-cooperative multiple
access; power efficient Nash equilibrium; uplink
collision channel; water-filling power allocation",
}
@Article{Kandemir:2008:SDC,
author = "Mahmut Kandemir and Ozcan Ozturk",
title = "Software-directed combined {CPU}\slash link voltage
scaling for {NoC}-based {CMPs}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "359--370",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375498",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network-on-Chip (NoC) based chip multiprocessors
(CMPs) are expected to become more widespread in
future, in both high performance scientific computing
and low-end embedded computing. For many execution
environments that employ these systems, reducing power
consumption is an important goal. This paper presents a
software approach for reducing power consumption in
such systems through compiler-directed
voltage/frequency scaling. The unique characteristic of
this approach is that it scales the voltages and
frequencies of select CPUs and communication links in a
coordinated manner to maximize energy savings without
degrading performance. Our approach has three important
components. The first component is the identification
of phases in the application. The next step is to
determine the critical execution paths and slacks in
each phase. For implementing these two components, our
approach employs a novel parallel program
representation. The last component of our approach is
the assignment of voltages and frequencies to CPUs and
communication links to maximize energy savings. We use
integer linear programming (ILP) for this
voltage/frequency assignment problem. To test our
approach, we implemented it within a compilation
framework and conducted experiments with applications
from the SPEComp suite and SPECjbb. Our results show
that the proposed combined CPU/link scaling is much
more effective than scaling voltages of CPUs or
communication links in isolation. In addition, we
observed that the energy savings obtained are
consistent across a wide range of values of our major
simulation parameters such as the number of CPUs, the
number of voltage/frequency levels, and the
thread-to-CPU mapping.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CMP; communication link; compiler; cpu; NoC; voltage
scaling",
}
@Article{Crk:2008:IAE,
author = "Igor Crk and Mingsong Bi and Chris Gniady",
title = "Interaction-aware energy management for wireless
network cards",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "371--382",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375499",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless Network Interface Cards (WNICs) are part of
every portable device, where efficient energy
management plays a significant role in extending the
device's battery life. The goal of efficient energy
management is to match the performance of the WNIC to
the network activity shaped by a running application.
In the case of interactive applications on mobile
systems, network I/O is largely driven by user
interactions. Current solutions either require
application modifications or lack a sufficient context
of execution that is crucial in making accurate and
timely predictions. This paper proposes a range of
user-interaction-aware mechanisms that utilize a novel
approach of monitoring a user's interaction with
applications through the capture and classification of
mouse events. This approach yields considerable
improvements in energy savings and delay reductions of
the WNIC, while significantly improving the accuracy,
timeliness, and computational overhead of predictions
when compared to existing state-of-the-art solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "context-awareness; energy management; interaction
monitoring; prediction; resource management; wireless
network cards",
}
@Article{Stanojevi:2008:FDE,
author = "Rade Stanojevi and Robert Shorten",
title = "Fully decentralized emulation of best-effort and
processor sharing queues",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "383--394",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Control of large distributed cloud-based services is a
challenging problem. The Distributed Rate Limiting
(DRL) paradigm was recently proposed as a mechanism for
tackling this problem. The heuristic nature of existing
DRL solutions makes their behavior unpredictable and
analytically untractable. In this paper we treat the
DRL problem in a mathematical framework and propose two
novel DRL algorithms that exhibit good and predictable
performance. The first algorithm Cloud Control with
Constant Probabilities (C3P) solves the DRL problem in
best effort environments, emulating the behavior of a
single best-effort queue in a fully distributed manner.
The second problem we approach is the DRL in processor
sharing environments. Our algorithm, Distributed
Deficit Round Robin (D2R2), parameterized by parameter
$ \alpha $, converges to a state that is, at most, $
O(1 / \alpha) $ away from the exact emulation of
centralized processor sharing queue. The convergence
and stability properties are fully analyzed for both
C3P and D2R2. Analytical results are validated
empirically through a number of representative packet
level simulations. The closed-form nature of our
results allows simple design rules which, together with
extremely low communication overhead, makes the
presented algorithms practical and easy to deploy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CDN; cloud control; consensus agreement; rate
limiting; stability and convergence",
}
@Article{Jagabathula:2008:ODS,
author = "Srikanth Jagabathula and Devavrat Shah",
title = "Optimal delay scheduling in networks with arbitrary
constraints",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "395--406",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375502",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of designing an online
scheduling scheme for a multi-hop wireless packet
network with arbitrary topology and operating under
arbitrary scheduling constraints. The objective is to
design a scheme that achieves high throughput and low
delay simultaneously. We propose a scheduling scheme
that --- for networks operating under primary
interference constraints --- guarantees a per-flow
end-to-end packet delay bound of $^{5 d} j / (1 -
\rho_j)$, at a factor 5 loss of throughput, where $
d_j$ is the path length (number of hops) of flow $j$
and $ \rho_j$ is the effective loading along the route
of flow $j$. Clearly, $ d_j$ is a universal lower bound
on end-to-end packet delay for flow $j$. Thus, our
result is essentially optimal. To the best of our
knowledge, our result is the first one to show that it
is possible to achieve a per-flow end-to-end delay
bound of $ O({\rm \# of hops})$ in a constrained
network.\par
Designing such a scheme comprises two related
subproblems: Global Scheduling and Local Scheduling.
Global Scheduling involves determining the set of links
that will be simultaneously active, without violating
the scheduling constraints. While local scheduling
involves determining the packets that will be
transferred across active edges. We design a local
scheduling scheme by adapting the Preemptive
Last-In-First-Out (PL) scheme, applied for
quasi-reversible continuous time networks, to an
unconstrained discrete-time network. A global
scheduling scheme will be obtained by using stable
marriage algorithms to emulate the unconstrained
network with the constrained wireless network.\par
Our scheme can be easily extended to a network
operating under general scheduling constraints, such as
secondary interference constraints, with the same delay
bound and a loss of throughput that depends on
scheduling constraints through an intriguing `sub-graph
covering' property.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay; scheduling algorithm; throughput",
}
@Article{Jung:2008:RSL,
author = "Kyomin Jung and Yingdong Lu and Devavrat Shah and
Mayank Sharma and Mark S. Squillante",
title = "Revisiting stochastic loss networks: structures and
algorithms",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "407--418",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers structural and algorithmic
problems in stochastic loss networks. The very popular
Erlang approximation can be shown to provide relatively
poor performance estimates, especially for loss
networks in the critically loaded regime. This paper
proposes a novel algorithm for estimating the
stationary loss probabilities in stochastic loss
networks based on structural properties of the exact
stationary distribution, which is shown to always
converge, exponentially fast, to the asymptotically
exact results. Using a variational characterization of
the stationary distribution, an alternative proof is
provided for an important result due to Kelly, which is
simpler and may be of interest in its own right. This
paper also determines structural properties of the
inverse Erlang function characterizing the region of
capacities that ensures offered traffic is served
within a set of loss probabilities. Numerical
experiments investigate various issues of both
theoretical and practical interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Erlang loss formula and fixed-point approximation;
loss networks; multidimensional stochastic processes;
stochastic approximations",
}
@Article{Bonald:2008:TCM,
author = "Thomas Bonald and Ali Ibrahim and James Roberts",
title = "Traffic capacity of multi-cell {WLANS}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "419--430",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375504",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance of WLANs has been extensively studied
during the past few years. While the focus has mostly
been on isolated cells, the coverage of WLANs is in
practice most often realised through several cells.
Cells using the same frequency channel typically
interact through the exclusion region enforced by the
RTS/CTS mechanism prior to the transmission of any
packet.\par
In this paper, we investigate the impact of this
interaction on the overall network capacity under
realistic dynamic traffic conditions. Specifically, we
represent each cell as a queue and derive the stability
condition of the corresponding coupled queuing system.
This condition is then used to calculate the network
capacity. To gain insight into the particular nature of
interference in multi-cell WLANs, we apply our model to
a number of simple network topologies and explicitly
derive the capacity in several cases. The results
notably show that the capacity gain obtained by using M
frequency channels can grow significantly faster than
M, the rate one might intuitively expect. In addition
to stability results, we present an approximate model
to derive the impact of network load on the mean
transfer rate seen by the users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity; flow-level model; IEEE 802.11; multi-cell
WLAN; stability",
}
@Article{Reineke:2008:RCC,
author = "Jan Reineke and Daniel Grund",
title = "Relative competitiveness of cache replacement
policies",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "431--432",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375506",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache performance; predictability; replacement policy;
WCET analysis; worst-case execution time",
}
@Article{Wen:2008:NDE,
author = "Zhihua Wen and Michael Rabinovich",
title = "Network distance estimation with dynamic landmark
triangles",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "433--434",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an efficient and accurate
approach to estimate the network distance between
arbitrary Internet hosts. We use three landmark hosts
forming a triangle in two-dimensional space to estimate
the distance between arbitrary hosts with simple
trigonometric calculations. To improve the accuracy of
estimation, we dynamically choose the `best' triangle
for a given pair of hosts using a heuristic algorithm.
Our experiments show that this approach achieves both
lower computational and network probing cost over the
classic landmarks-based approach while producing more
accurate estimates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network distance estimation",
}
@Article{Yuksel:2008:CSI,
author = "Murat Yuksel and Kadangode K. Ramakrishnan and
Shivkumar Kalyanaraman and Joseph D. Houle and Rita
Sadhvani",
title = "Class-of-service in {IP} {backbones}: informing the
network neutrality debate",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "435--436",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The benefit of Class-of-Service (CoS) is an important
topic in the `Network Neutrality' debate. Proponents of
network neutrality suggest that over-provisioning is a
viable alternative to CoS. We quantify the extra
capacity requirement for an over-provisioned classless
(i.e., best-effort) network compared to a CoS network
providing the same delay or loss performance for
premium traffic. We first develop a link model that
quantifies this Required Extra Capacity (REC). For
bursty and realistic traffic distributions, we find the
REC using ns-2 simulation comparisons of the CoS and
classless link cases. We use these link models to
quantify the REC for realistic network topologies. We
show that REC can be significant even when the
proportion of premium traffic is small, a situation
often considered benign for the over-provisioning
alternative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "class-of-service; economics; network neutrality;
performance",
}
@Article{Dreger:2008:PRC,
author = "Holger Dreger and Anja Feldmann and Vern Paxson and
Robin Sommer",
title = "Predicting the resource consumption of network
intrusion detection systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "437--438",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When installing network intrusion detection systems
(NIDSs), operators are faced with a large number of
parameters and analysis options for tuning trade-offs
between detection accuracy versus resource
requirements. In this work we set out to assist this
process by understanding and predicting the CPU and
memory consumption of such systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "NIDS; performance model",
}
@Article{Li:2008:EMA,
author = "Bin Li and Lu Peng and Balachandran Ramadass",
title = "Efficient {MART}-aided modeling for microarchitecture
design space exploration and performance prediction",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "439--440",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer architects usually evaluate new designs by
cycle-accurate processor simulation. This approach
provides detailed insight into processor performance,
power consumption and complexity. However, only
configurations in a subspace can be simulated in
practice due to long simulation time and limited
resource, leading to suboptimal conclusions which might
not be applied in a larger design space. In this paper,
we propose an automated performance prediction approach
which employs state-of-the-art techniques from
experiment design, machine learning and data mining.
Our method not only produces highly accurate
estimations for unsampled points in the design space,
but also provides interpretation tools that help
investigators to understand performance bottlenecks.
According to our experiments, by sampling only 0.02\%
of the full design space with about 15 millions points,
the median percentage errors, based on 5000 independent
test points, range from 0.32\% to 3.12\% in 12
benchmarks. Even for the worst-case performance, the
percentage errors are within 7\% for 10 out of 12
benchmarks. In addition, the proposed model can also
help architects to find important design parameters and
performance bottlenecks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "design space exploration; MART-aided models; Multiple
Additive Regression Trees (MARG); performance
prediction",
}
@Article{Balon:2008:CII,
author = "Simon Balon and Guy Leduc",
title = "Combined intra- and inter-domain traffic engineering
using hot-potato aware link weights optimization",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "441--442",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375511",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A well-known approach to intradomain traffic
engineering consists in finding the set of link weights
that minimizes a network-wide objective function for a
given intradomain traffic matrix. This approach is
inadequate because it ignores a potential impact on
interdomain routing due to hot-potato routing policies.
This may result in changes in the intradomain traffic
matrix that have not been anticipated by the link
weights optimizer, possibly leading to degraded network
performance.\par
We propose a BGP-aware link weights optimization method
that takes these hot-potato effects into account. This
method uses the interdomain traffic matrix and other
available BGP data, to extend the intradomain topology
with external virtual nodes and links, on which all the
well-tuned heuristics of a classical link weights
optimizer can be applied. Our method can also optimize
the traffic on the interdomain peering links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; hot-potato routing; IGP; OSPF; traffic
engineering",
}
@Article{Anderson:2008:MDW,
author = "Eric W. Anderson and Caleb T. Phillips and Kevin S.
Bauer and Dirk C. Grunwald and Douglas C. Sicker",
title = "Modeling directionality in wireless networks: extended
abstract",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "443--444",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375512",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The physical-layer models commonly used in current
networking research only minimally address the
interaction of directional antennas and radio
propagation. This paper compares the models found in
popular simulation tools with measurements taken across
a variety of links in multiple environments. We find
that the effects of antenna direction are significantly
different from the models used by the common wireless
network simulators. We propose a parametric model which
better captures the effects of different propagation
environments on directional antenna systems. We believe
that adopting this model will allow more realistic
simulation of protocols relying on directional
antennas, supporting better design and more valid
assessment of those protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "antenna; directional; modeling; networking;
propagation; wireless",
}
@Article{Bremler-Barr:2008:LIC,
author = "Anat Bremler-Barr and David Hay and Danny Hendler and
Boris Farber",
title = "Layered interval codes for {TCAM}-based
classification",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "445--446",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375513",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "classification; TCAM",
}
@Article{Ramabhadran:2008:DRD,
author = "Sriram Ramabhadran and Joseph Pasquale",
title = "Durability of replicated distributed storage systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "447--448",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375514",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of guaranteeing data durability
[2] in distributed storage systems based on
replication. Our work is motivated by several recent
efforts [3, 5, 1] to build such systems in a
peer-to-peer environment. The key features of this
environment which make achieving durability difficult
are (1) data lifetimes may be several orders of
magnitude larger than the lifetimes of individual
storage units, and (2) the system may have little or no
control over the participation of these storage units
in the system. We use a model-based approach to develop
engineering principles for designing automated
replication and repair mechanisms to implement
durability in such systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "durability; replication",
}
@Article{Li:2008:IEM,
author = "Feihui Li and Mahmut Kandemir and Mary J. Irwin",
title = "Implementation and evaluation of a migration-based
{NUCA} design for chip multiprocessors",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "449--450",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375515",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Chip Multiprocessors (CMPs) and Non-Uniform Cache
Architectures (NUCAs) represent two emerging trends in
computer architecture. Targeting future CMP based
systems with NUCA type L2 caches, this paper proposes a
novel data migration algorithm for parallel
applications and evaluates it. The goal of this
migration scheme is to determine a suitable location
for each data block within a large L2 space at any
given point during execution. A unique characteristic
of the proposed scheme is that it models the problem of
optimal data placement in the L2 cache space as a two
dimensional post office placement problem, presents a
practical architectural implementation of this model,
and gives an evaluation of the proposed
implementation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CMP; data migration; NUCA; post office placement
problem",
}
@Article{Alouf:2008:MGQ,
author = "Sara Alouf and Eitan Altman and Amar Prakash Azad",
title = "{M/G/1} queue with repeated inhomogeneous vacations
applied to {IEEE 802.16e} power saving",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "451--452",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375516",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "constrained optimization; M/G/1 queue with repeated
inhomogeneous vacations; numerical analysis; power save
mode; system response time",
}
@Article{Seetharaman:2008:MID,
author = "Srinivasan Seetharaman and Mostafa H. Ammar",
title = "Managing inter-domain traffic in the presence of
{BitTorrent} file-sharing",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "453--454",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375517",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Overlay routing operating in a selfish manner is known
to cause undesired instability when it interacts with
native layer routing. We observe similar selfish
behavior with the BitTorrent protocol, where its
performance-awareness causes it to constantly alter the
routing decisions (peer and piece selection). This
causes fluctuations in the load experienced by the
underlying native network. By using real BitTorrent
traces and a comprehensive simulation with different
network characteristics, we show that BitTorrent
systems easily disrupt the load balance across
inter-domain links. Further, we find that existing
native layer traffic management schemes suffer from
several downsides and are not conducive to deployment.
To resolve this dilemma, we propose two BitTorrent
strategies that are effective in resolving the
cross-layer conflict.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; conflict; contention; cross-layer; traffic
engineering; traffic management",
}
@Article{Mota-Garcia:2008:COE,
author = "Edmar Mota-Garcia and Rogelio Hasimoto-Beltran",
title = "Clock offset estimation using collaborative one-way
transit time",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "455--456",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375518",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new collaborative clock offset estimation
scheme between two nodes in the Internet using
independent one-way offset estimations. Our proposal
(different than current schemes in the literature) is
intended to provide a fast and accurate clock offset
estimation in approximately [Round-Trip Time
(RTT)+40]ms. The scheme sends a group of 5 probes in
the forward and reverse paths, and models the One-way
Transit Time (OTT) by a Gamma distribution (with
parameters adapted to actual path condition) to
estimate the minimum distribution value (or long-term
minimum OTT value). End nodes exchange their
corresponding minimum distribution values to get an
improved final clock offset estimate, which takes into
account the network path asymmetries. We show that our
scheme provides a faster clock offset estimation with
lower RMSE and superior stability than NTP and current
NTP-like state of the art methodologies in the
literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clock offset estimation; one-way transit time",
}
@Article{Gupta:2008:SQL,
author = "Gagan R. Gupta and Ness B. Shroff",
title = "Scheduling with queue length guarantees for shared
resource systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "457--458",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375519",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a class of schemes called GMWM that
guarantee optimal throughput for queuing systems with
arbitrary constraints on the set of jobs that can be
served simultaneously. We obtain an analytical upper
bound on the expected queue length. To further tighten
the upper bound, we formulate it as a convex
optimization problem. We also show that whenever the
arrival process is stabilizable, the scheme is
guaranteed to achieve an expected queue length that is
no larger than the expected queue length of any
stationary randomized policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Lyapunov theory; scheduling",
}
@Article{Chen:2008:ECD,
author = "Aiyou Chen and Li Li and Jin Cao",
title = "Estimating cardinality distributions in network
traffic: extended abstract",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "459--460",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375520",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Information on network host connectivity patterns are
important for network monitoring and traffic
engineering. In this paper, an efficient streaming
algorithm is proposed to estimate cardinality
distributions including connectivity distributions,
e.g. percent of hosts with any given number of distinct
communicating peers or flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cardinality distribution; streaming algorithm",
}
@Article{Grit:2008:WFS,
author = "Laura E. Grit and Jeffrey S. Chase",
title = "Weighted fair sharing for dynamic virtual clusters",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "461--462",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375521",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a shared server infrastructure, a scheduler
controls how quantities of resources are shared over
time in a fair manner across multiple, competing
consumers. It should support wide (parallel) requests
for variable-sized pool of resources, provide assurance
of minimum resource allotment on demand, and give
predictable assignments. Our approach integrates a fair
queuing algorithm with a calendar scheduler. We present
WINKS, a proportional share allocation policy that
addresses the needs of shared server environments. It
extends start-time fair queuing to support wide
requests with backfill, advance reservations, dynamic
cluster sizing, dynamic request sizing, and intra-flow
request prioritization. It also preserves fairness
properties across queue transformations and calendar
operations needed to implement these extensions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cluster computing; fair sharing; proportional sharing;
virtual computing; weighted fair queuing",
}
@Article{Sundaram:2008:ETF,
author = "Vasumathi Sundaram and Abhishek Chandra and Jon
Weissman",
title = "Exploring the throughput-fairness tradeoff of deadline
scheduling in heterogeneous computing environments",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "463--464",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375522",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The scalability and computing power of large-scale
computational platforms has made them attractive for
hosting compute-intensive time-critical applications.
Many of these applications are composed of
computational tasks that require specific deadlines to
be met for successful completion. In this paper, we
show that combining redundant scheduling with
deadline-based scheduling in these systems leads to a
fundamental tradeoff between throughput and fairness.
We propose a new scheduling algorithm called Limited
Resource Earliest Deadline (LRED) that couples
redundant scheduling with deadline-driven scheduling in
a flexible way by using a simple tunable parameter to
exploit this tradeoff. Our evaluation of LRED shows
that LRED provides a powerful mechanism to achieve
desired throughput or fairness under high loads and low
timeliness environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "deadline; fairness; throughput",
}
@Article{Papp:2008:CMV,
author = "Gabor Papp and Chris GauthierDickey",
title = "Characterizing multiparty voice communication for
multiplayer games",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "465--466",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the last few years, the number of game players
using voice communication to talk to each other while
playing games has increased dramatically. In fact, many
modern games and game consoles have added voice support
instead of expecting third-party companies to provide
this technology. Unlike traditional voice-over-IP
technology, where most conversations are between two
people, voice communication in games often has 5 or
more people talking together as they play.\par
We present the first measurement study on the
characteristics of multiparty voice communications.
Over a 3 month period, we measured over 7,000 sessions
on an active multi-party voice communication server to
quantify the characteristics of communication generated
by game players, including overall server traffic,
group sizes, sessions characteristics, and speaking
(and silence) durations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer games; silence periods; talkspurts; voice
communication",
}
@Article{Meiners:2008:AAR,
author = "Chad R. Meiners and Alex X. Liu and Eric Torng",
title = "Algorithmic approaches to redesigning {TCAM}-based
systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "467--468",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; pipeline; range expansion;
TCAM",
}
@Article{Douceur:2008:PAR,
author = "John R. Douceur",
title = "Performance analysis in the real world",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "469--470",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What issues are on the minds of industrial performance
analysts? Four representatives of world-class product
organizations will describe their work at the front
lines of measurement, modeling, and performance tuning.
Topics will include performance engineering of
middleware at IBM, tools for detecting false sharing in
large-scale multiprocessors at Hewlett--Packard, kernel
thread-scheduling performance in multiprocessors at
Microsoft, and low-overhead instrumentation for
profiling large-scale services at Google. Plenty of
time will be available to ask questions about how to
direct our research to have the greatest impact on
industrial practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "engineering; kernel performance; large-scale services;
middleware; performance analysis; profiling tools;
storage systems",
}
@Article{Tan:2008:IMV,
author = "Tingxi Tan and Rob Simmonds and Bradley Arlt and
Martin Arlitt and Bruce Walker",
title = "Image management in a virtualized data center",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "4--9",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453177",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Industrial research firms such as Gartner and IDC are
predicting an explosion in the number of online
services in the coming years. Virtualization
technologies could play an important role in such a
world, as they create an opportunity to provide
services in a cost-effective manner. However, to
achieve ideal savings, more dynamic environments must
be created, with Virtual Machines (VMs) being
provisioned and altered on-the-fly. Management issues
arise when using these elastic resources at scale. In
this study, we provide an initial investigation of
performance and scalability issues for image management
in a virtualized data center. Results provided show
that the choice of storage solution and access protocol
matters. For example, our tests show the time to start
a VM from a local hard drive under I/O intensive
workload increases by a factor of 15 and for certain
shared storage options, this factor increases to 30
times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "image management; performance; scalability;
virtualization",
}
@Article{Chandra:2008:CDF,
author = "Abhishek Chandra and Rohini Prinja and Sourabh Jain
and ZhiLi Zhang",
title = "Co-designing the failure analysis and monitoring of
large-scale systems",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "10--15",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453178",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale distributed systems provide the backbone
for numerous distributed applications and online
services. These systems span over a multitude of
computing nodes located at different geographical
locations connected together via wide-area networks and
overlays. A major concern with such systems is their
susceptibility to failures leading to downtime of
services and hence high monetary/business costs. In
this paper, we argue that to understand failures in
such a system, we need to co-design monitoring system
with the failure analysis system. Unlike existing
monitoring systems which are not designed specifically
for failure analysis, we advocate a new way to design a
monitoring system with the goal of uncovering causes of
failures. Similarly the failure analysis techniques
themselves need to go beyond simple statistical
analysis of failure events in isolation to serve as an
effective tool. Towards this end, we provide a
discussion of some guiding principles for the co-design
of monitoring and failure analysis systems for
planetary scale systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2008:ARC,
author = "Abhishek B. Sharma and Ranjita Bhagwan and Monojit
Choudhury and Leana Golubchik and Ramesh Govindan and
Geoffrey M. Voelker",
title = "Automatic request categorization in {Internet}
services",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "16--25",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453179",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modeling system performance and workload
characteristics has become essential for efficiently
provisioning Internet services and for accurately
predicting future resource requirements on anticipated
workloads. The accuracy of these models benefits
substantially by differentiating among categories of
requests based on their resource usage characteristics.
However, categorizing requests and their resource
demands often requires significantly more monitoring
infrastructure. In this paper, we describe a method to
automatically differentiate and categorize requests
without requiring sophisticated monitoring techniques.
Using machine learning, our method requires only
aggregate measures such as total number of requests and
the total CPU and network demands, and does not assume
prior knowledge of request categories or their
individual resource demands. We explore the feasibility
of our method on the .Net PetShop 4.0 benchmark
application, and show that it works well while being
lightweight, generic, and easily deployable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kansal:2008:FGE,
author = "Aman Kansal and Feng Zhao",
title = "Fine-grained energy profiling for power-aware
application design",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "26--31",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453180",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Significant opportunities for power optimization exist
at application design stage and are not yet fully
exploited by system and application designers. We
describe the challenges developers face in optimizing
software for energy efficiency by exploiting
application-level knowledge. To address these
challenges, we propose the development of automated
tools that profile the energy usage of various resource
components used by an application and guide the design
choices accordingly. We use a preliminary version of a
tool we have developed to demonstrate how automated
energy profiling helps a developer choose between
alternative designs in the energy-performance trade-off
space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fonseca:2008:LRM,
author = "Nahur Fonseca and Mark Crovella and Kav{\'e}
Salamatian",
title = "Long range mutual information",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "32--37",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453181",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network traffic modeling generally views traffic as a
superposition of flows that creates a timeseries of
volume counts (e.g. of bytes or packets). What is
omitted from this view of traffic is the contents of
packets. Packet contents (e.g. header fields) contain
considerable information that can be useful in many
applications such as change and anomaly detection, and
router performance evaluation. The goal of this paper
is to draw attention to the problem of modeling traffic
with respect to the contents of packets. In this
regard, we identify a new phenomenon: long range mutual
information (LRMI), which means that the dependence of
the contents of a pair of packets decays as a power of
the lag between them. We demonstrate that although LRMI
is hard to measure, and hard to model using the
mathematical tools at hand, its effects are easy to
identify in real traffic, and it may have a
considerable impact on a number of applications. We
believe that work in modeling this phenomenon will open
doors to new kinds of traffic models, and new advances
in a number of applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2008:HPM,
author = "Giuliano Casale and Ningfang Mi and Ludmila Cherkasova
and Evgenia Smirni",
title = "How to parameterize models with bursty workloads",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "38--44",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453182",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although recent advances in theory indicate that
burstiness in the service time process can be handled
effectively by queueing models (e.g.,MAP queueing
networks [2]), there is a lack of understanding and of
practical results on how to perform model
parameterization, especially when this parameterization
must be derived from limited coarse
measurements.\par
We propose a new parameterization methodology based on
the index of dispersion of the service process at a
server, which is inferred by observing the number of
completions within the concatenated busy periods of
that server. The index of dispersion together with
other measurements that reflect the 'estimated' mean
and the 95th percentile of service times are used to
derive a MAP process that captures well burstiness of
the true service process.\par
Detailed experimentation on a TPC-W testbed where all
measurements are obtained via a commercially available
tool, the HP (Mercury) Diagnostics, shows that the
proposed technique offers a simple yet powerful
solution to the difficult problem of inferring accurate
descriptors of the service time process from coarse
measurements. Experimental and model prediction results
are in excellent agreement and argue strongly for the
effectiveness of the proposed methodology under bursty
or simply variable workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2008:DPF,
author = "Bill Lin and Jun (Jim) Xu",
title = "{DRAM} is plenty fast for wirespeed statistics
counting",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "45--51",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453183",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Per-flow network measurement at Internet backbone
links requires the efficient maintenance of large
arrays of statistics counters at very high speeds (e.g.
40 Gb/s). The prevailing view is that SRAM is too
expensive for implementing large counter arrays, but
DRAM is too slow for providing wirespeed updates. This
view is the main premise of a number of hybrid
SRAM/DRAM architectural proposals [2, 3, 4, 5] that
still require substantial amounts of SRAM for large
arrays. In this paper, we present a contrarian view
that modern commodity DRAM architectures, driven by
aggressive performance roadmaps for consumer
applications (e.g. video games), have advanced
architecture features that can be exploited to make
DRAM solutions practical. We describe two such schemes
that can harness the performance of these DRAM
offerings by enabling the interleaving of counter
updates to multiple memory banks. These counter schemes
are the first to support arbitrary increments and
decrements for either integer or floating point number
representations at wirespeed. We believe our
preliminary success with the use of DRAM schemes for
wirespeed statistics counting opens the possibilities
for broader research opportunities to generalize the
proposed ideas for other network measurement
functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; network management; network
measurement; statistics counter",
}
@Article{Agrawal:2008:TRF,
author = "Nitin Agrawal and Andrea C. Arpaci-Dusseau and Remzi
H. Arpaci-Dusseau",
title = "Towards realistic file-system benchmarks with
{CodeMRI}",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "52--57",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453184",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarks are crucial to understanding software
systems and assessing their performance. In file-system
research, synthetic benchmarks are accepted and widely
used as substitutes for more realistic and complex
workloads. However, synthetic benchmarks are largely
based on the benchmark writer's interpretation of the
real workload, and how it exercises the system API.
This is insufficient since even a simple operation
through the API may end up exercising the file system
in very different ways due to effects of features such
as caching and prefetching. In this paper, we describe
our first steps in creating 'realistic synthetic'
benchmarks by building a tool, CodeMRI. CodeMRI
leverages file-system domain knowledge and a small
amount of system profiling in order to better
understand how the benchmark is stressing the system
and to deconstruct its workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weingartner:2008:SNE,
author = "Elias Weing{\"a}rtner and Florian Schmidt and Tobias
Heer and Klaus Wehrle",
title = "Synchronized network emulation: matching prototypes
with complex simulations",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "58--63",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453185",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network emulation, in which real systems interact with
a network simulation, is a common evaluation method in
computer networking research. Until now, the simulation
in charge of representing the network has been required
to be real-time capable, as otherwise a time drift
between the simulation and the real network devices may
occur and corrupt the results. In this paper, we
present our work on synchronized network emulation. By
adding a central synchronization entity and by
virtualizing real systems for means of control, we can
build-up network emulations which contain both
unmodified x86 systems and network simulations of any
complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamurthy:2008:WOS,
author = "Balachander Krishnamurthy and Walter Willinger",
title = "What are our standards for validation of
measurement-based networking research?",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "64--69",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453186",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Standards? What standards?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Curry:2008:RAE,
author = "Roger Curry and Cameron Kiddle and Nayden Markatchev
and Rob Simmonds and Tingxi Tan and Martin Arlitt and
Bruce Walker",
title = "Running applications efficiently in online social
networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "71--74",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453188",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the past several years, online social networks
(OSNs) such as Facebook and MySpace have become
extremely popular with Internet users. Such sites are
popular with users because they simplify both
communication among 'communities' and access to
applications. Application developers are attracted to
these sites also, as they are able to exploit
'word-of-mouth' marketing, which these OSN sites have
embodied into their user experience. A challenge for
developers though is managing the application, as it is
difficult to predict how successful the marketing will
be. Our solution combines an OSN, Virtual Appliances,
and a utility computing environment together. We
demonstrate our solution using the Facebook portal
(OSN), the Fire Dynamics Simulator (application), and a
utility environment we built using tools such as
Condor, Moab and Xen. The application is supported
using Virtual Appliances, which interact with our
flexible infrastructure to dynamically expand and
contract based on user demand. Thus, we are able to
make much more efficient use of the underlying physical
infrastructure. We believe that our solution also has
great potential for enterprise IT environments. Initial
feedback suggests combining an OSN with our flexible
infrastructure provides a much better user experience
than the traditional, standalone use of the (legacy)
application, and simplifies the management and
increases the effective utilization of the underlying
IT resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "infrastructure; social networking; virtualization",
}
@Article{Zhang:2008:KTB,
author = "Eddy Zheng Zhang and Giuliano Casale and Evgenia
Smirni",
title = "{KPC-Toolbox}: best recipes toward automatization of
workload fitting",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "75--78",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453189",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the KPC-Toolbox, a set of MATLAB scripts
for fitting workload traces into Markovian Arrival
Processes (MAPs) in an automatic way. Given that the
MAP parameterization space can be very large, we focus
on first determining the order of the smallest MAP that
can fit the trace well using the Bayesian Information
Criterion ({\em BIC\/}). Having determined the order of
the target MAP, the KPC-Toolbox automatically derives a
MAP that captures accurately the moments and temporal
dependence of the trace. We present experiments showing
the effectiveness of the KPC-Toolbox in fitting traces
that are well-documented in the literature as very
challenging ones to fit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeVera:2008:AQE,
author = "Daniel {De Vera} and Pablo Rodr{\'\i}guez-Bocca and
Gerardo Rubino",
title = "Automatic quality of experience measuring on video
delivering networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "79--82",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453190",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This article describes a full video delivery network
monitoring suite. Our monitoring tool offers a new view
of a video delivery network, based on the quality as
perceived by final users (what is nowadays called
Quality of Experience, in short QoE). We measure the
perceived quality at the client side by means of the
recently proposed PSQA technology, by studying the
video flows at the frame level. The developed
monitoring suite is a completely free-software
application, based on well-known technologies such as
Simple Network Management Protocol or Round Robin
Databases, which can be executed in various operating
systems. In this short article we explain the tool
implementation and we present some of the measurements
performed with it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measuring; monitoring; QoE; VDN; video",
}
@Article{Rossi:2008:PS,
author = "Dario Rossi and Silvio Valenti and Paolo Veglia and
Dario Bonfiglio and Marco Mellia and Michela Meo",
title = "Pictures from the {Skype}",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "83--86",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453191",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on the characterization and
classification of Skype traffic, a nowadays very
popular and fashionable VoIP application. Building over
previous work, we develop a software tool which can be
used to examine the evolution of Skype call
classification in an interactive fashion. The
demonstrator software focuses on the main aspects of
Skype traffic characterization and presents the traffic
patterns Skype generates during a call or while idle.
In addition, the demonstrator shows the evolution of
the internal indexes the Skype classifiers
use.\par
After describing the classification process and the
demonstrator software, we use the tool to demonstrate
the feasibility of online Skype traffic identification,
considering both accuracy and computational costs.
Experimental results show that few seconds of
observation are enough to allow the classifier engines
to correctly identify the presence of Skype flows.
Moreover, results indicate that the classification
engine can cope with multi-Gbps links in real-time
using common off-the-shelf hardware.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "demonstrator; experimentation; measurement",
}
@Article{Ormont:2008:CMW,
author = "Justin Ormont and Jordan Walker and Suman Banerjee",
title = "Continuous monitoring of wide-area wireless networks:
data collection and visualization",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "87--89",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453192",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we present an infrastructure for
monitoring and visualizing performance of a wide-area
wireless network, We present a unique, vehicle-mounted
platform and a testbed for wide-area wireless
experimentation. The testbed nodes are mounted on metro
transit city buses in Madison, WI, and are currently
equipped with both cellular EV-DO* and WiFi interfaces.
Our initial goal for this infrastructure is to
continuously monitor characteristics and performance of
large-scale wireless networks, e.g., city-wide mesh
networks or cellular networks. In such networks,
customers experience a very large range of geographic
and mobility-related radio environments. A
vehicle-mounted platform, with fairly deterministic
mobility patterns, can provide an efficient, low-cost,
and robust method to gather much needed performance
data on parameters like RF coverage, available
bandwidth, and impact of mobility. Our demonstration
outlines the framework of such a distributed
measurement system. We also showcase the potential
benefits by presenting our initial measurements from
this testbed through the use of intuitive visualization
interface.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anouar:2008:OOW,
author = "Hicham Anouar and Christian Bonnet and Daniel
C{\^a}mara and Fethi Filali and Raymond Knopp",
title = "An overview of {OpenAirInterface} wireless network
emulation methodology",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "90--94",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453193",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The OpenAirInterface wireless network emulator, a tool
with the dual objective of performing protocol and
application performance evaluation, in addition to
real-time layer 2/3 protocol implementation validation,
is described. The current example protocol
implementations closely resemble those of evolving
UMTS-LTE and 802.16e/m networks with the additional
possibility for creating mesh network topologies. They
do not provide any form of compliance, however, with
these standards. The emulation environment comes in
both real-time and non-real-time flavors based on
RTAI/Linux open-source developments. Novel ideas for
physical layer (PHY) abstraction are also reviewed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2008:NPN,
author = "Xiaoyue Jiang",
title = "New perspectives on network calculus",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "95--97",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453195",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Originated in communications engineering and
theoretically rooted in idempotent analysis, the theory
of network calculus (NetCal) presents an elegant
methodology for offering performance guarantees in
deterministic queuing systems. In this research we
developed two new formulations of Net-Cal, each of
which bears some unique insights. A fuzzy formulation
maps NetCal's (min,+) convolution operator to the
addition of fuzzy numbers. A conjugate perspective
based on the notion of Legendre transform leads to a
new NetCal formulation to be termed as CT-NetCal, which
possesses some distinct advantages in modeling,
computation and interpretation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fuzzy number addition; Legendre transform; network
calculus; product norm",
}
@Article{Garikiparthi:2008:BPA,
author = "Chaitanya Garikiparthi and Appie van de Liefvoort and
Ken Mitchell",
title = "Busy period analysis of finite {QBD} processes",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "98--100",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453196",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the number of customers served and the
length of a busy period for finite quasi birth and
death (QBD) processes where either one or both of the
arrival or service processes can be serially correlated
or interdependent. Special cases include the G/G/1/K,
M/G/1/K, and G/M/1/K queues. The resulting algorithms
are linear algebraic in nature and are easily
implemented. The solutions allow studies on how the
moments and correlations in the arrival and service
processes affect the busy period. This includes the
probability of serving exactly {\em n\/} customers
during a busy period and the moments of the length of
the busy period for different system (queue) sizes. We
present an example of a QBD process where arrival and
service processes are strongly dependent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2008:FRS,
author = "Predrag R. Jelenkovi{\'c} and Xiaozhu Kang",
title = "Is fair resource sharing responsible for spreading
long delays?",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "101--103",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453197",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We show that mixing the statistically long jobs
(subexponential) and short ones (exponentially bounded)
using processor sharing service discipline causes long
(subexponential) delays for all types of jobs in the
system. Since processor sharing represents a baseline
fair scheduling discipline used in designing Web
servers, as well as the basic model of TCP bandwidth
sharing, our finding suggests that even though fairness
possesses many desirable attributes, it causes
unnecessarily long delays for statistically short jobs.
Hence, fairness comes with a price.\par
We further quantify the preceding result when the long
jobs follow the widely observed power law distribution
$ x^{- \alpha } $, $ \alpha $ > 0, where we discover
the criticality of the {\em lognormal\/} distribution
for the delay characteristics of the lighter jobs.
Specifically, we find that when the shorter jobs are
heavier than {\em lognormal}, the sojourn time $V$ and
the service time distribution $B$ of the shorter jobs
are tail equivalent $ P[V > x] \sim P[B > (1 - \rho)
x]$. However, if $ P[B > x]$ is lighter than {\em
lognormal}, the preceding tail equivalence does not
hold.\par
Furthermore, when the shorter jobs $B$ have much
lighter tails $ e^{- \lambda x \& \# 946}$, $ \lambda >
0$, $ \beta > 0$, we show that the distribution of the
delay $V$ for these jobs satisfy, as $ x \rightarrow
\infty $, $ - \log P[V > x] \sim c(x \log x) \beta /
\beta + 1$, where $c$ is explicitly computable. Note
that $ \beta = 1$ and $ \beta = 2$ represent the
exponential and Gaussian cases with the corresponding
delay distributions approximately of the form $ e^{-
\sqrt {x} \log x}$ and $ e^{-(x \log x) 2 / 3}$,
respectively. Our results are different from the
existing ones in the literature that focused on the
delays which are of the same form (tail equivalent) as
the jobs size distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic analysis; fairness; heavy tails; induced
long delays; light tails; processor sharing queue;
scheduling",
}
@Article{Gupta:2008:FOQ,
author = "Varun Gupta",
title = "Finding the optimal quantum size: {Sensitivity}
analysis of the {\em {M\slash G\slash 1\/}} round-robin
queue",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "104--106",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453198",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the round robin (RR) scheduling policy
where the server processes each job in its buffer for
at most a fixed quantum, q, in a round-robin fashion.
The processor sharing (PS) policy is an idealization of
the quantum-based round-robin scheduling in the limit
where the quantum size becomes infinitesimal, and has
been the subject of many papers. It is well known that
the mean response time in an M/G/1/PS queue depends on
the job size distribution via only its mean. However,
almost no explicit results are available for the
round-robin policy. For example, how does the
variability of job sizes affect the mean response time
in an M/G/1/RR queue? How does one choose the optimal
quantum size in the presence of switching overheads? In
this paper we present some preliminary answers to these
fundamental questions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2008:ASI,
author = "Eitan Bachmat and Hagit Sarfati",
title = "Analysis of size interval task assignment policies",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "107--109",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453199",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the performance of Size Interval task
assignment (SITA) scheduling policies, for multi-host
scheduling in a non-preemptive environment. We
establish a general duality theory for the performance
analysis of SITA policies. When the job size
distribution is Bounded Pareto and the range of job
sizes tends to infinity. we determine asymptotically
optimal cutoff values and provide asymptotic formulas
for average waiting time and slowdown. In the case of
inhomogeneous hosts we determine their optimal
ordering. We also consider TAGS policies. We provide a
general formula that describes their load handling
capabilities and examine their performance when the job
size distribution is Bounded Pareto.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2008:ELS,
author = "Ho-Lin Chen and Jason R. Marden and Adam Wierman",
title = "The effect of local scheduling in load balancing
designs",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "110--112",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453200",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:2008:JRP,
author = "Yuan Wu and Danny H. K. Tsang",
title = "Joint rate-and-power allocation for multi-channel
spectrum sharing networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "113--115",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453201",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this abstract, we propose a study on joint
rate-and-power allocation problem for multi-channel
spectrum sharing networks (SSNs). We formulate this
cross-layer optimization problem as a non-cooperative
potential game {\em G\/}$_{{\em JRPA \/ }}$ in which
each user has a coupled two-tuple strategy, i.e.,
simultaneous rate and multi-channel power allocations.
A multi-objective cost function is designed to
represent user's awareness of both QoS provisioning and
power saving. Using the game-theoretic formulation, we
investigate the properties of Nash equilibrium (N.E.)
for our {\em G\/}$_{{\em JRPA \/ }}$ model, including
its existence, and properties of QoS provisioning as
well as power saving. Furthermore, a layered structure
is derived by applying Lagrangian dual decomposition to
{\em G\/}$_{{\em JRPA \/ }}$ and a distributed
algorithm is proposed to find the N.E. via this
structure.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2008:SMB,
author = "Pei Li and John C. S. Lui and Yinlong Xu",
title = "A stochastic model for {BitTorrent}-like systems",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "116--118",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453202",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2008:CMS,
author = "Predrag R. Jelenkovi{\'c} and Xiaozhu Kang",
title = "Characterizing the miss sequence of the {LRU} cache",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "119--121",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453203",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Renewed interest in caching systems stems from their
wide-spread use for reducing the document download
latency over the Internet. Since caches are usually
organized in a hierarchical manner, it is important to
study the performance properties of tandem caches. The
first step in understanding this problem is to
characterize the miss stream from one single cache
since it represents the input to the next level cache.
In this regard, we discover that the miss stream from
one single cache is approximated well by the
superposition of a number of asymptotically independent
renewal processes. Interestingly, when this weakly
correlated miss sequence is fed into another cache,
this barely observable correlation can lead to
measurably different caching performance when compared
to the independent reference model. This result is
likely to enable the development of a rigorous analysis
of the tandem cache performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "average-case analysis; cache fault probability;
hierarchical caching; least-recently-used caching; web
caching; Zipf's law",
}
@Article{Simatos:2008:SSM,
author = "Florian Simatos and Danielle Tibi",
title = "Study of a stochastic model for mobile networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "122--124",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453204",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Momcilovic:2008:TSL,
author = "Petar Mom{\v{c}}ilovi{\'c} and Mark S. Squillante",
title = "On throughput in stochastic linear loss networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "125--127",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453205",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2008:FLR,
author = "Varun Gupta and Peter G. Harrison",
title = "Fluid level in a reservoir with an on-off source",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "128--130",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453206",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We obtain the Laplace transform of the fluid level
probability density function, in terms of the on-period
density function, for a fluid queue (or reservoir) with
on-off input at equilibrium. We further obtain explicit
expressions for the moments of fluid level in terms of
the moments of the on-period and hence derive an
algorithm for the moments of fluid level at every queue
in a tandem network. It turns out that to calculate the
$k$ th moment at the $i$ th queue, only the first $ k +
1$ moments of the on-period of the input process to the
first queue are required.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwak:2008:SAS,
author = "K. J. Kwak and Y. M. Baryshnikov and E. G. Coffman",
title = "Self-assembling sweep-and-sleep sensor systems",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "131--133",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453207",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a self-assembling sleep-wake
sensor system that is scalable, easily implemented, and
energy conserving. Sensors actively detecting events
form wave fronts that sweep the sensor field. An
application of concepts from cellular automata theory
accounts for much of its novelty. The system has
additional, highly desirable properties such as a
self-healing capability, fault tolerance, asynchronous
operation, seamless accommodation of obstacles in the
sensor field, and it is highly effective even in the
case of intelligent intruders, i.e., those who know
sensor design and sensor locations. System performance
is a focus of the paper, and, as in the study of the
emergent behavior of cellular automata, an instructive
example of experimental mathematics. Related open
questions in mathematical performance analysis are
reviewed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2008:CCO,
author = "Giuliano Casale",
title = "{CoMoM}: class-oriented evaluation of multiclass
models",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "134--136",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453208",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dieker:2008:COF,
author = "A. B. Dieker and S. Ghosh and M. S. Squillante",
title = "Capacity optimization in feedforward {Brownian}
networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "137--139",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453209",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haverkort:2008:QAG,
author = "Boudewijn R. Haverkort and Markus Siegle and Maarten
van Steen",
title = "Quantitative analysis of gossiping protocols",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "2--2",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Katoen:2008:HMA,
author = "Joost-Pieter Katoen",
title = "How to model and analyze gossiping protocols?",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "3--6",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crouzen:2008:AFM,
author = "Pepijn Crouzen and Jaco van de Pol and Arend Rensink",
title = "Applying formal methods to gossiping networks with
{mCRL} and groove",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "7--16",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we explore the practical possibilities
of using formal methods to analyze gossiping networks.
In particular, we use &\#956;CRL and Groove to model
the peer sampling service, and analyze it through a
series of model transformations to CTMCs and finally
MRMs. Our tools compute the expected value of various
network quality indicators, such as average path
lengths, over all possible system runs. Both transient
and steady state analysis are supported. We compare our
results with the simulation and emulation results found
in [10].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwiatkowska:2008:AGP,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "Analysis of a gossip protocol in {PRISM}",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "17--22",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481511",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gossip protocols have been proposed as a robust and
efficient method for disseminating information
throughout dynamically changing networks. We present an
analysis of a gossip protocol using probabilistic model
checking and the tool PRISM. Since the behaviour of
these protocols is both probabilistic and
nondeterministic in nature, this provides a good
example of the exhaustive, quantitative analysis that
probabilistic model checking techniques can provide. In
particular, we compute minimum and maximum values,
representing the best- and worst-case performance of
the protocol under any scheduling, and investigate both
their relationship with the average values that would
be obtained through simulation and the precise
scheduling which achieve these values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krieger:2008:VPM,
author = "Thomas Krieger and Martin Riedl and Johann Schuster
and Markus Siegle",
title = "A view-probability-matrix approach to the modelling of
gossiping protocols",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "23--30",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481512",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the quantitative analysis of
gossiping protocols. In contrast to existing approaches
which are entirely based on the simulation of the
individual nodes' behaviours, we present a new approach
based on summary stochastic models for the peer
sampling service. Instead of an ordinary state- and
transition-based model, a matrix-based approach is
presented. Starting from a basic model with static node
population and without ageing of neighbourhood
information, refinements of the model are presented
which enable the modelling of ageing and dynamic
population. The paper also contains some experimental
results for the different models introduced in the
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bakhshi:2008:MAE,
author = "Rena Bakhshi and Lucia Cloth and Wan Fokkink and
Boudewijn R. Haverkort",
title = "{MeanField} analysis for the evaluation of gossip
protocols",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "31--39",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481513",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gossip protocols are designed to operate in very
large, decentralised networks. A node in such a network
bases its decision to interact (gossip) with another
node on its partial view of the global system. Because
of the size of these networks, analysis of gossip
protocols is mostly done using simulation, which tend
to be expensive in computation time and memory
consumption.\par
We introduce mean-field analysis as an analytical
method to evaluate gossip protocols. Nodes in the
network are represented by small identical stochastic
models. Joining all nodes would result in an enormous
stochastic process. If the number of nodes goes to
infinity, however, mean-field analysis allows us to
replace this intractably large stochastic process by a
small deterministic process. This process approximates
the behaviour of very large gossip networks, and can be
evaluated using simple matrix-vector multiplications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Estrada:2008:DEM,
author = "Trilce Estrada and Olac Fuentes and Michela Taufer",
title = "A distributed evolutionary method to design scheduling
policies for volunteer computing",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "40--49",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481515",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Volunteer Computing (VC) is a paradigm that takes
advantage of idle cycles from computing resources
donated by volunteers and connected through the
Internet to compute large-scale, loosely coupled
simulations. A big challenge in VC projects is the
scheduling of work-units across heterogeneous,
volatile, and error-prone computers. The design of
efficient scheduling policies for VC projects involves
subjective and time-demanding tuning that is driven by
knowledge of the project designer. VC projects are in
need of a faster and project-independent method to
automate the scheduling design.\par
To automatically generate a scheduling policy, we must
explore the extremely large space of syntactically
valid policies. Given the size of this search space,
exhaustive search is not feasible. Thus in this paper
we propose to solve the problem using an evolutionary
method to automatically generate a set of scheduling
policies that are project-independent, minimize errors,
and maximize throughput in VC projects. Our method
includes a genetic algorithm where the representation
of individuals, the fitness function, and the genetic
operators are specifically tailored to get effective
policies in a short time. The effectiveness of our
method is evaluated with SimBA, a Simulator of BOINC
Applications. In contrast with manually designed
scheduling policies that often perform well only for
the specific project they were designed for and require
months of tuning, our resulting scheduling policies
provide better overall throughput across the different
VC projects considered in this work and were generated
by our method in a time window of one week.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed systems; genetic algorithms; global
computing; volatile systems",
}
@Article{Eddy:2008:BPI,
author = "Wesley M. Eddy",
title = "Basic properties of the {IPv6} {AS}-level topology",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "50--57",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481516",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several well-known measurement studies have revealed
aspects of the Internet's AS-level and router-level
topologies, and derived a few important properties.
This has yielded graph models and parameter ranges that
allow for greater confidence in simulation of new
protocols as well as a deeper understanding of the
Internet's structure and similarity to other types of
technological, biological, economic, and social
networks. The majority of Internet topology studies
have been focused on the IPv4 portion of the Internet,
and at this time relatively few observations of the
Internet's IPv6 topology have been published. In this
report, we use over three years of data gathered in the
Route Views archives to describe some basic properties
of the IPv6 AS-level topology. We find similarities
with the IPv4 AS graph in several regards, including
the small-world nature of the graph. We also find some
interesting differences, including the values of the
graph's diameter and the criticality of a few
well-connected nodes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2009:SIT,
author = "Giuliano Casale and Richard R. Muntz and Giuseppe
Serazzi",
title = "Special issue on tools for computer performance
modeling and reliability analysis",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "2--3",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baarir:2009:GTR,
author = "Soheib Baarir and Marco Beccuti and Davide Cerotti and
Massimiliano De Pierro and Susanna Donatelli and
Giuliana Franceschinis",
title = "The {GreatSPN} tool: recent enhancements",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "4--9",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "GreatSPN is a tool that supports the design and the
qualitative and quantitative analysis of Generalized
Stochastic Petri Nets (GSPN) and of Stochastic
Well-Formed Nets (SWN). The very first version of
GreatSPN saw the light in the late eighties of last
century: since then two main releases where developed
and widely distributed to the research community:
GreatSPN1.7 [13], and GreatSPN2.0 [8]. This paper
reviews the main functionalities of GreatSPN2.0 and
presents some recently added features that
significantly enhance the efficacy of the tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertoli:2009:JPE,
author = "Marco Bertoli and Giuliano Casale and Giuseppe
Serazzi",
title = "{JMT}: performance engineering tools for system
modeling",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "10--15",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the Java Modelling Tools (JMT) suite, an
integrated framework of Java tools for performance
evaluation of computer systems using queueing models.
The suite offers a rich user interface that simplifies
the definition of performance models by means of wizard
dialogs and of a graphical design workspace.\par
The performance evaluation features of JMT span a wide
range of state-of-the-art methodologies including
discrete-event simulation, mean value analysis of
product-form networks, analytical identification of
bottleneck resources in multiclass environments, and
workload characterization with fuzzy clustering. The
discrete-event simulator supports several advanced
modeling features such as finite capacity regions,
load-dependent service times, bursty processes,
fork-and-join nodes, and implements spectral estimation
for analysis of simulative results. The suite is
open-source, released under the GNU general public
license (GPL), and it is available for free download
at: http://jmt.sourceforge.net.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaonkar:2009:PDM,
author = "Shravan Gaonkar and Ken Keefe and Ruth Lamprecht and
Eric Rozier and Peter Kemper and William H. Sanders",
title = "Performance and dependability modeling with
{M{\"o}bius}",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "16--21",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "M{\"o}bius is a multi-paradigm multi-solution
framework to describe and analyze stochastic models of
discrete-event dynamic systems. M{\"o}bius is widely
used in academia and industry for the performance and
dependability assessment of technical systems. It comes
with a design of experiments as well as automated
support for distributing a series of simulation
experiments over a network to support the exploration
of design spaces for real-world applications. In
addition to that, the M{\"o}bius simulator interfaces
with Traviando, a separate trace analyzer and
visualizer that helps to investigate the details of a
complex model for validation, verification, and
debugging purposes. In this paper, we outline the
development of a multi-formalism model of a Lustre-like
file system, the analysis of its detailed simulated
behavior, and the results obtained from a simulation
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arns:2009:OTO,
author = "Markus Arns and Peter Buchholz and Dennis M{\"u}ller",
title = "{OPEDo}: a tool for the optimization of performance
and dependability models",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "22--27",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "OPEDo is a software tool for the optimization of
discrete event systems according to performance or
dependability measures. The tool can be seen as an add
on to various tools for performance and dependability
analysis. The goal of OPEDo is to provide a wide
variety of optimization algorithms for complex black
box functions as they are required for the model based
optimization of discrete event systems using
analytically tractable models or simulation models. The
paper introduces the software architecture of the tool,
gives a brief sketch of the integrated optimization
algorithms and presents several examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tribastone:2009:PEP,
author = "Mirco Tribastone and Adam Duguid and Stephen Gilmore",
title = "The {PEPA Eclipse} plugin",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "28--33",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The PEPA Eclipse Plug-in supports the creation and
analysis of performance models, from small-scale Markov
models to large-scale simulation studies and
differential equation systems. Whichever form of
analysis is used, models are expressed in a single
highlevel language for quantitative modelling,
Performance Evaluation Process Algebra (PEPA).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dingle:2009:PTP,
author = "Nicholas J. Dingle and William J. Knottenbelt and
Tamas Suto",
title = "{PIPE2}: a tool for the performance evaluation of
generalised stochastic {Petri Nets}",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "34--39",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an overview of
Platform-Independent Petri Net Editor 2 (PIPE2), an
open-source tool that supports the design and analysis
of Generalised Stochastic Petri Net (GSPN) models.
PIPE2 's extensible design enables developers to add
functionality via pluggable analysis modules. It also
acts as a front-end for a parallel and distributed
performance evaluation environment. With PIPE2, users
are able to design and evaluate performance queries
expressed in the Performance Tree formalism.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GSPNs; parallel and distributed computing; performance
trees; PIPE2; stochastic modelling",
}
@Article{Kwiatkowska:2009:PPM,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "{PRISM}: probabilistic model checking for performance
and reliability analysis",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "40--45",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530882",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Probabilistic model checking is a formal verification
technique for the modelling and analysis of stochastic
systems. It has proved to be useful for studying a wide
range of quantitative properties of models taken from
many different application domains. This includes, for
example, performance and reliability properties of
computer and communication systems. In this paper, we
give an overview of the probabilistic model checking
tool PRISM, focusing in particular on its support for
continuous-time Markov chains and Markov reward models,
and how these can be used to analyse performability
properties.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kounev:2009:QPM,
author = "Samuel Kounev and Christofer Dutz",
title = "{QPME}: a performance modeling tool based on queueing
{Petri Nets}",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "46--51",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530883",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing Petri nets are a powerful formalism that can
be exploited for modeling distributed systems and
analyzing their performance and scalability. By
combining the modeling power and expressiveness of
queueing networks and stochastic Petri nets, queueing
Petri nets provide a number of advantages. In this
paper, we present QPME (Queueing Petri net Modeling
Environment) --- a tool that supports the modeling and
analysis of systems using queueing Petri nets. QPME
provides an Eclipse-based editor for designing queueing
Petri net models and a powerful simulation engine for
analyzing the models. After presenting the tool, we
discuss the ongoing work on the QPME project and the
planned future enhancements of the tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Trivedi:2009:SAT,
author = "Kisho S. Trivedi and Robin Sahner",
title = "{SHARPE} at the age of twenty two",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "52--57",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the modeling tool called SHARPE
(Symbolic Hierarchical Automated Reliability and
Performance Evaluator), a general hierarchical modeling
tool that analyzes stochastic models of reliability,
availability, performance, and performability. It
allows the user to choose the number of levels of
models, the type of model at each level, and which
results from each model level are to act as which
parameters in which higher-level models. SHARPE
includes algorithms for analysis of fault trees,
reliability block diagrams, acyclic series-parallel
graphs, acyclic and cyclic Markov and semi-Markov
models, generalized stochastic Petri nets, and closed
single- and multi-chain product-form queueing networks.
For many of these, the user can choose among
alternative algorithms, and can decide whether to get a
result in the form of a distribution function (symbolic
in the time variable) or as a mean or probability.
SHARPE has been useful to students, practicing
engineers, and researchers. In this paper we discuss
the history of SHARPE, give some examples of its use,
and talk about some lessons learned.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:2009:AFS,
author = "Gianfranco Ciardo and Andrew S. Miner and Min Wan",
title = "Advanced features in {SMART}: the stochastic model
checking analyzer for reliability and timing",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "58--63",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe some of the advanced features of the
software tool SmArT, the Stochastic Model checking
Analyzer for Reliability and Timing. Initially
conceived as a software package for numerical solution
and discrete-event simulation of stochastic models,
SmArT now also provides powerful model-checking
capabilities, thanks to its extensive use of various
forms of decision diagrams, which in turn also greatly
increase the efficiency of its stochastic analysis
algorithms. These aspects make it an excellent choice
when tackling systems with extremely large state
spaces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:2009:TIM,
author = "Edmundo {de Souza e Silva} and Daniel R. Figueiredo
and Rosa M. M. Le{\~a}o",
title = "The {TANGRAMII} integrated modeling environment for
computer systems and networks",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "64--69",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The TANGRAM-II tool has been developed aiming at
supporting the performance analyst throughout the
entire modeling process, from model construction and
model solution to experimentation. The tool has a
powerful user interface that can be tailored to
specific problem domain, it includes a rich set of
analytic solution techniques, distinct options for
obtaining the measures of interest, a hybrid fluid and
event driven simulator, visualization features to
follow the model's evolution with time, traffic
generators and active measurement techniques to assist
the user in performing computer networking
experimentation. These and additional characteristics
make TANGRAM-II a unique tool for research and
education.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lelarge:2009:ECE,
author = "Marc Lelarge",
title = "Efficient control of epidemics over random networks",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "1--12",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555351",
abstract = "Motivated by the modeling of the spread of viruses or
epidemics with coordination among agents, we introduce
a new model generalizing both the basic contact model
and the bootstrap percolation. We analyze this
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pathak:2009:BSC,
author = "Abhinav Pathak and Feng Qian and Y. Charlie Hu and Z.
Morley Mao and Supranamaya Ranjan",
title = "Botnet spam campaigns can be long lasting: evidence,
implications, and analysis",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "13--24",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555352",
abstract = "Accurately identifying spam campaigns launched by a
large number of bots in a botnet allows for accurate
spam campaign signature generation and hence is
critical to defeating spamming botnets. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Torres:2009:IUB,
author = "Ruben D. Torres and Mohammad Y. Hajjat and Sanjay G.
Rao and Marco Mellia and Maurizio M. Munafo",
title = "Inferring undesirable behavior from {P2P} traffic
analysis",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "25--36",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555353",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555353",
abstract = "While peer-to-peer (P2P) systems have emerged in
popularity in recent years, their large-scale and
complexity make them difficult to reason about. In this
paper, we argue that systematic analysis of traffic
characteristics \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anand:2009:RNT,
author = "Ashok Anand and Chitra Muthukrishnan and Aditya Akella
and Ramachandran Ramjee",
title = "Redundancy in network traffic: findings and
implications",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "37--48",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555355",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555355",
abstract = "A large amount of popular content is transferred
repeatedly across network links in the Internet. In
recent years, protocol-independent redundancy
elimination, which can remove duplicate strings from
within arbitrary \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2009:UCN,
author = "Yu Jin and Esam Sharafuddin and Zhi-Li Zhang",
title = "Unveiling core network-wide communication patterns
through application traffic activity graph
decomposition",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "49--60",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555356",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555356",
abstract = "As Internet communications and applications become
more complex,operating, managing and securing networks
have become increasingly challenging tasks. There are
urgent demands for more sophisticated techniques for
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramasubramanian:2009:TIL,
author = "Venugopalan Ramasubramanian and Dahlia Malkhi and
Fabian Kuhn and Mahesh Balakrishnan and Archit Gupta
and Aditya Akella",
title = "On the treeness of {Internet} latency and bandwidth",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "61--72",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555357",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555357",
abstract = "Existing empirical studies of Internet structure and
path properties indicate that the Internet is
tree-like. This work quantifies the degree to which at
least two important Internet measures--latency
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meiners:2009:TTA,
author = "Chad R. Meiners and Alex X. Liu and Eric Torng",
title = "Topological transformation approaches to optimizing
{TCAM}-based packet classification systems",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "73--84",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555359",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555359",
abstract = "Several range reencoding schemes have been proposed to
mitigate the effect of range expansion and the
limitations of small capacity, large power consumption,
and high heat generation of TCAM-based packet
classification \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shen:2009:RDP,
author = "Kai Shen and Christopher Stewart and Chuanpeng Li and
Xin Li",
title = "Reference-driven performance anomaly identification",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "85--96",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555360",
abstract = "Complex system software allows a variety of execution
conditions on system configurations and workload
properties. This paper explores a principled use of
reference executions--those of similar execution
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2009:NWS,
author = "Gagan R. Gupta and Sujay Sanghavi and Ness B. Shroff",
title = "Node weighted scheduling",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "97--108",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555361",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555361",
abstract = "This paper proposes a new class of online policies for
scheduling in input-buffered crossbar switches. Given
an initial configuration of packets at the input
buffers, these policies drain all packets in the system
in the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chaintreau:2009:AGS,
author = "Augustin Chaintreau and Jean-Yves {Le Boudec} and
Nikodin Ristanovic",
title = "The age of gossip: spatial mean field regime",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "109--120",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555363",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555363",
abstract = "Disseminating a piece of information, or updates for a
piece of information, has been shown to benefit greatly
from simple randomized procedures, sometimes referred
to as gossiping, or epidemic algorithms. Similarly, in
a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bodas:2009:SMC,
author = "Shreeshankar Bodas and Sanjay Shakkottai and Lei Ying
and R. Srikant",
title = "Scheduling in multi-channel wireless networks: rate
function optimality in the small-buffer regime",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "121--132",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555364",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555364",
abstract = "We consider the problem of designing scheduling
algorithms for the downlink of cellular wireless
networks where bandwidth is partitioned into tens to
hundreds of parallel channels, each of which can be
allocated \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajagopalan:2009:NAT,
author = "Shreevatsa Rajagopalan and Devavrat Shah and Jinwoo
Shin",
title = "Network adiabatic theorem: an efficient randomized
protocol for contention resolution",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "133--144",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555365",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555365",
abstract = "The popularity of Aloha -like algorithms for
resolution of contention between multiple entities
accessing common resources is due to their extreme
simplicity and distributed nature. Example applications
of such algorithms \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2009:DDC,
author = "Abhishek B. Sharma and Leana Golubchik and Ramesh
Govindan and Michael J. Neely",
title = "Dynamic data compression in multi-hop wireless
networks",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "145--156",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555367",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555367",
abstract = "Data compression can save energy and increase network
capacity in wireless sensor networks. However, the
decision of whether and when to compress data can
depend upon platform hardware, topology, wireless
channel \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2009:OPA,
author = "Anshul Gandhi and Mor Harchol-Balter and Rajarshi Das
and Charles Lefurgy",
title = "Optimal power allocation in server farms",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "157--168",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555368",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555368",
abstract = "Server farms today consume more than 1.5\% of the
total electricity in the U.S. at a cost of nearly \$4.5
billion. Given the rising cost of energy, many
industries are now seeking solutions for how to best
make use of their \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coskun:2009:EIJ,
author = "Ayse K. Coskun and Richard Strong and Dean M. Tullsen
and Tajana Simunic Rosing",
title = "Evaluating the impact of job scheduling and power
management on processor lifetime for chip
multiprocessors",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "169--180",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555369",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555369",
abstract = "Temperature-induced reliability issues are among the
major challenges for multicore architectures. Thermal
hot spots and thermal cycles combine to degrade
reliability. This research presents new
reliability-aware job \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2009:UIC,
author = "Feng Chen and David A. Koufaty and Xiaodong Zhang",
title = "Understanding intrinsic characteristics and system
implications of flash memory based solid state drives",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "181--192",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555371",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555371",
abstract = "Flash Memory based Solid State Drive (SSD) has been
called a ``pivotal technology'' that could
revolutionize data storage systems. Since SSD shares a
common interface with the traditional hard disk drive
(HDD), both physically \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schroeder:2009:DEW,
author = "Bianca Schroeder and Eduardo Pinheiro and
Wolf-Dietrich Weber",
title = "{DRAM} errors in the wild: a large-scale field study",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "193--204",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555372",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555372",
abstract = "Errors in dynamic random access memory (DRAM) are a
common form of hardware failure in modern compute
clusters. Failures are costly both in terms of hardware
replacement costs and service disruption. While a
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mi:2009:RUI,
author = "Ningfang Mi and Alma Riska and Xin Li and Evgenia
Smirni and Erik Riedel",
title = "Restrained utilization of idleness for transparent
scheduling of background tasks",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "205--216",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555373",
abstract = "A common practice in system design is to treat
features intended to enhance performance and
reliability as low priority tasks by scheduling them
during idle periods, with the goal to keep these
features transparent to the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2009:NSB,
author = "Yi Wang and Michael Schapira and Jennifer Rexford",
title = "Neighbor-specific {BGP}: more flexible routing
policies while improving global stability",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "217--228",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555375",
abstract = "The Border Gateway Protocol (BGP) offers network
administrators considerable flexibility in controlling
how traffic flows through their networks. However, the
interaction between routing policies in different
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Laoutaris:2009:DTB,
author = "Nikolaos Laoutaris and Georgios Smaragdakis and Pablo
Rodriguez and Ravi Sundaram",
title = "Delay tolerant bulk data transfers on the {Internet}",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "229--238",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555376",
abstract = "Many emerging scientific and industrial applications
require transferring multiple Tbytes of data on a daily
basis. Examples include pushing scientific data from
particle accelerators/colliders to laboratories around
the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2009:CCD,
author = "Wenjie Jiang and Rui Zhang-Shen and Jennifer Rexford
and Mung Chiang",
title = "Cooperative content distribution and traffic
engineering in an {ISP} network",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "239--250",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555377",
abstract = "Traditionally, Internet Service Providers (ISPs) make
profit by providing Internet connectivity, while
content providers (CPs) play the more lucrative role of
delivering content to users. As network connectivity is
increasingly a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2009:LDS,
author = "Edith Cohen and Haim Kaplan",
title = "Leveraging discarded samples for tighter estimation of
multiple-set aggregates",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "251--262",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555379",
abstract = "Many datasets, including market basket data, text or
hypertext documents, and events recorded in different
locations or time periods, can be modeled as a
collection of sets over a ground set of keys. Common
queries \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Loiseau:2009:MLE,
author = "Patrick Loiseau and Paulo Gon{\c{c}}alves and
St{\'e}phane Girard and Florence Forbes and Pascale
Vicat-Blanc Primet",
title = "Maximum likelihood estimation of the flow size
distribution tail index from sampled packet data",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "263--274",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555380",
abstract = "In the context of network traffic analysis, we address
the problem of estimating the tail index of flow (or
more generally of any group) size distribution from the
observation of a sampled population of packets
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qiu:2009:MCP,
author = "Tongqing Qiu and Zihui Ge and Seungjoon Lee and Jia
Wang and Qi Zhao and Jun Xu",
title = "Modeling channel popularity dynamics in a large {IPTV}
system",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "275--286",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555381",
abstract = "Understanding the channel popularity or content
popularity is an important step in the workload
characterization for modern information distribution
systems (e.g., World Wide Web, peer-to-peer
file-sharing systems, \ldots{}).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2009:SRT,
author = "Mor Harchol-Balter and Alan Scheller-Wolf and Andrew
R. Young",
title = "Surprising results on task assignment in server farms
with high-variability workloads",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "287--298",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555383",
abstract = "This paper investigates the performance of task
assignment policies for server farms, as the
variability of job sizes (service demands) approaches
infinity. Our results reveal that some common wisdoms
regarding task \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sandholm:2009:MOU,
author = "Thomas Sandholm and Kevin Lai",
title = "{MapReduce} optimization using regulated dynamic
prioritization",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "299--310",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555384",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555384",
abstract = "We present a system for allocating resources in shared
data and compute clusters that improves MapReduce job
scheduling in three ways. First, the system uses
regulated and user-assigned priorities to offer
different \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2009:SAA,
author = "Varun Gupta and Mor Harchol-Balter",
title = "Self-adaptive admission control policies for
resource-sharing systems",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "311--322",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555385",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555385",
abstract = "We consider the problem of admission control in
resource sharing systems, such as web servers and
transaction processing systems, when the job size
distribution has high variability, with the aim of
minimizing the mean \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scheuermann:2009:WSS,
author = "Bj{\"o}rn Scheuermann and Wolfgang Kiess",
title = "Who said that?: the send-receive correlation problem
in network log analysis",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "3--5",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When analyzing packet log files from network
experiments, the question which received packet belongs
to which send event arises. If non-unique (i.e.,binary
identical) transmissions have occurred, this
send-receive correlation problem can become very
challenging. We discuss this problem in the case of
networks with local broadcast media, and outline first
directions how it can be solved.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anandkumar:2009:SRM,
author = "Animashree Anandkumar and Chatschik Bisdikian and Ting
He and Dakshi Agrawal",
title = "Selectively retrofitting monitoring in distributed
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "6--8",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current distributed systems carry legacy subsystems
lacking sufficient instrumentation for monitoring the
end-to-end business transactions supported by these
systems. In the absence of instrumentation, only
probabilistic monitoring is possible by using
time-stamped log-records. Retro fitting these systems
with expensive monitoring instrumentation provides
high-granularity, precise tracking of transactions.
Given a limited budget, local instrumentation
strategies which maximize the effectiveness of
monitoring transactions throughout the system are
proposed. The operation of the end-to-end system is
modeled by a queuing network; each queue represents a
subsystem which produces time-stamped log-records as
transactions pass through it. Two simple heuristics for
instrumentation are proposed which become optimal under
certain conditions. One heuristic selects states in the
transition diagram for local instrumentation in the
decreasing order of the load factors of their queues.
Sufficient conditions for this load-factor heuristic to
be optimal are proven using the notion of stochastic
order. The other heuristic selects states in the
transition diagram based on the approximated tracking
accuracy of probabilistic monitoring at each state,
which is shown to be tight at low arrival rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bipartite matching; probabilistic transaction
monitoring; queuing networks; stochastic comparison",
}
@Article{Dubey:2009:PMD,
author = "Abhishek Dubey and Rajat Mehrotra and Sherif
Abdelwahed and Asser Tantawi",
title = "Performance modeling of distributed multi-tier
enterprise systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "9--11",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2009:DCR,
author = "Chao Wang and Xiaoli Ma",
title = "Deriving {Cram{\'e}r--Rao} bounds and maximum
likelihood estimators for traffic matrix inference",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "12--14",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic matrix estimation has caught numerous
attentions these days due to its importance on network
management tasks such as traffic engineering and
capacity planning for Internet Service Providers (ISP).
Various estimation models and methods have been
proposed to estimate the traffic matrix. However, it is
difficult to compare these methods since they adopt
different model assumptions. Currently most evaluations
are based on some particular realization of data. We
propose to use the (Bayesian) Cram{\'e}r--Rao Bound
(CRB) as a benchmark on these estimators. We also
derive the maximum likelihood estimator (MLE) for
certain models. With coupled mean and variance, our
simulations show that the least squares (LS) estimator
reaches the CRB asymptotically, while the MLEs are
difficult to calculate when the dimension is high.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krioukov:2009:GFS,
author = "Dmitri Krioukov and Fragkiskos Papadopoulos and
Mari{\'a}n Bogu{\~n}{\'a} and Amin Vahdat",
title = "Greedy forwarding in scale-free networks embedded in
hyperbolic metric spaces",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "15--17",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cho:2009:BTB,
author = "Jeong-woo Cho and Yuming Jiang",
title = "Basic theorems on the backoff process in 802.11",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "18--20",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2009:OJF,
author = "Jayakrishnan Nair and Steven H. Low",
title = "Optimal job fragmentation",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "21--23",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2009:EAL,
author = "Erlin Yao and Yungang Bao and Guangming Tan and Mingyu
Chen",
title = "Extending {Amdahl's Law} in the multicore era",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "24--26",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korzun:2009:LEM,
author = "Dmitry Korzun and Andrei Gurtov",
title = "A local equilibrium model for {P2P} resource ranking",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "27--29",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many Peer-to-Peer (P2P) systems rely on cooperation
among nodes that should be supported with incentives.
Introducing ranks into P2P designs could reward
cooperating nodes and increase overall system
performance. In this paper, we consider the problem of
P2P ranking. In a P2P resource sharing system (RSS),
the ranks allow a node to decide which sources to keep
locally, which external resources to download and
through which nodes, what control to apply for transit
resource requests, and how much quality of service
(QoS) to provide. We introduce a mathematical model for
local P2P resource ranking that optimizes these
decisions. Complete proofs can be found in our
technical report.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2009:MCAa,
author = "Daniel Sadoc Menasch{\'e} and Antonio A. Arag{\~a}o
Rocha and Edmundo {de Souza e Silva} and Rosa M. Meri
Le{\~a}o and Don Towsley and Arun Venkataramani",
title = "Modeling chunk availability in {P2P} swarming
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "30--32",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hohlfeld:2009:VIV,
author = "Oliver Hohlfeld and Florin Ciucu",
title = "Viewing impaired video transmissions from a modeling
perspective",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "33--35",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2009:WOS,
author = "Gagan R. Gupta and Sujay Sanghavi and Ness B. Shroff",
title = "Workload optimality in switches without arrivals",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "36--38",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze a switch with cross-bar constraints.
Beginning with an initial loading and no further
arrivals, we provide necessary conditions for a
scheduling policy to minimize the workload at all
times. We show that these conditions are sufficient for
a switch of size N x 3 or smaller. We then consider a
weaker notion of optimality: cumulative average
workload optimality. Using a counter example for a 7 x
7 switch, we show that it is not possible to
approximate the cumulative average workload within
(1+4/475) of the optimal at all times. We conjecture
that the workload under the MVM policy is within twice
of the optimal at all times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andrew:2009:OSS,
author = "Lachlan L. H. Andrew and Adam Wierman and Ao Tang",
title = "Optimal speed scaling under arbitrary power
functions",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "39--41",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates the performance of online
dynamic speed scaling algorithms for the objective of
minimizing a linear combination of energy and response
time. We prove that (SRPT, {\em P\/}$^{- - 1}$ ({\em
n\/})), which uses Shortest Remaining Processing Time
(SRPT) scheduling and processes at speed such that the
power used is equal to the queue length, is
2-competitive for a very wide class of power-speed
tradeoff functions. Further, we prove that there exist
tradeoff functions such that no online algorithm can
attain a competitive ratio less than 2.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Verloop:2009:HTA,
author = "I. M. Verloop and U. Ayesta and R.
N{\'u}{\~n}ez-Queija",
title = "Heavy-traffic analysis of the {M\slash PH\slash 1}
discriminatory processor sharing queue with
phase-dependent weights",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "42--44",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze a generalization of the Discriminatory
Processor Sharing (DPS)queue in a heavy-traffic
setting. Customers present in the system are served
simultaneously at rates controlled by a vector of
weights. We assume phase-type distributed service
requirements and allow that customers have different
weights in various phases of their service. We
establish a state-space collapse for the queue length
vector in heavy traffic. The result shows that in the
limit, the queue length vector is the product of an
exponentially distributed random variable and a
deterministic vector. This generalizes a previous
result by [2] who considered a DPS queue with
exponentially distributed service requirements. We
finally discuss some implications for residual service
requirements and monotonicity properties in the
ordinary DPS model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2009:IAS,
author = "J. Anselmi and Y. Lu and M. Sharma and M. S.
Squillante",
title = "Improved Approximations for Stochastic Loss Networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "45--47",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weingartner:2009:TAI,
author = "Elias Weing{\"a}rtner and Florian Schmidt and Tobias
Heer and Klaus Wehrle",
title = "Time accurate integration of software prototypes with
event-based network simulations",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "49--50",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of network emulation brings together the
flexibility of network simulations and the accuracy of
real-world prototype implementations. However, this
approach suffers from the fundamental problem of
simulation overload which occurs if the simulation is
not able to execute in real-time. We tackle this
problem with a concept we call Synchronized Network
Emulation It enables the time accurate integration of
implementations with network simulations of any
complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2009:ETC,
author = "Haifeng Chen and Wenxuan Zhang and Guofei Jiang",
title = "Experience transfer for the configuration tuning in
large scale computing systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "51--52",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "configuration tuning; distributed systems; knowledge
acquisition; knowledge reuse",
}
@Article{Lin:2009:RID,
author = "Bill Lin and Jun (Jim) Xu and Nan Hua and Hao Wang and
Haiquan (Chuck) Zhao",
title = "A randomized interleaved {DRAM} architecture for the
maintenance of exact statistics counters",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "53--54",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We extend a previously proposed randomized interleaved
DRAM architecture [1] that can maintain wirespeed
updates (say 40 Gb/s) to a large array (say millions)
of counters. It works by interleaving updates to
randomly distributed counters across multiple memory
banks. Though unlikely, an adversary can conceivably
overload a memory bank by triggering frequent updates
to the same counter. In this work, we show this
'attack' can be mitigated through caching pending
updates, which can catch repeated updates to the same
counter within a sliding time window. While this
architecture of combining randomization with caching is
simple and straightforward, the primary contribution of
this work is to rigorously prove that it can handle
with overwhelming probability all adversarial update
patterns, using a combination of tail bound techniques,
convex ordering theory, and queueing analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "convex ordering; statistics counters; tail bound",
}
@Article{Zhao:2009:MPA,
author = "Bridge Zhao and y. K. Li and John C. S. Lui and
Dah-Ming Chiu",
title = "On modeling product advertisement in social networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "55--56",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639583",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advertising via social networks is receiving more
attention these days. Given a fixed investment (e.g.,
free samples), a company needs to find out the final
probability that users will purchase the product. In
this paper we characterize and model various influence
mechanisms that govern the word-of-mouth spread of
advertisements in large social networks. We use the
local mean field (LMF) technique to analyze large scale
networks wherein states of nodes can be changed by
various influence mechanisms. Extensive simulations are
carried out to validate the accuracy of our model, and
the results also provide insights on designing
advertising strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "economics; epidemics; influence model; local mean
field",
}
@Article{Zahn:2009:ESF,
author = "Thomas Zahn and Greg O'Shea and Antony Rowstron",
title = "An empirical study of flooding in mesh networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "57--58",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639584",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Triukose:2009:CDN,
author = "Sipat Triukose and Zhihua Wen and Michael Rabinovich",
title = "Content delivery networks: how big is big enough?",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "59--60",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639585",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The central question addressed in this paper is
whether a content delivery network (CDN)needs to deploy
its servers in a large number of locations to achieve
its current levels of performance. Our study indicates
that a relatively small number of consolidated data
centers might provide similar performance to end-users.
or over 30\%of the total 34,000 servers claimed by
Akamai during the study period, were pingable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2009:SFM,
author = "Zhibin Yu and Hai Jin",
title = "Simple and fast micro-architecture simulation: a
trisection {Cantor} fractal approach",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "61--62",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639586",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the prohibitively long time when detailedly
simulating a realistic benchmark to its completion,
sampling is frequently used to reduce the simulation
time. However, it may often require profiling or
iterative simulations to determine the sampling
parameters. This paper employs the generation procedure
of trisection Cantor set, one classic fractal, to
select instructions simulated in detail as an approach
to enable a simple and fast micro-architecture
simulation. Randomly selected six benchmarks from SPEC
CPU2000 are tested on the simulator, CantorSim, which
implements the trisection Cantor fractal approach. The
results show that it is very easy to use this approach
and it can achieve actual average acceleration of
23.4\% over SMARTS [3] while the accuracy only reduces
marginally. CantorSims accuracy is validated against
the sim-outorder and is accurate in a 3.2\% error
margin. Similar CPI relative errors with the same
parameter values of experiments on simulators of
different processor models indicate that this approach
is micro-architecture independent and can be applied to
well predict the performance of new micro-architecture
design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cantor set; cycle-accurate simulation; fractal
geometry; micro-architecture simulator; performance
evaluation",
}
@Article{Key:2009:RGE,
author = "Peter Key and Alexandre Proutiere",
title = "Routing games with elastic traffic",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "63--64",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we introduce and investigate a novel
class of multipath routing games with elastic traffic.
Users open one or more connections along different
feasible paths from source to destination and act
selfishly--seeking to transfer data as fast as
possible. Users only control their routing choices,
and once these choices have been made, the connection
rates are elastic and determined via congestion control
algorithms (e.g. TCP) which ultimately maximize a
certain notion of the network utility. We analyze the
existence and the performance of the Nash Equilibria
(NEs) of the resulting routing games.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lange:2009:ESI,
author = "John R. Lange and J. Scott Miller and Peter A. Dinda",
title = "{EmNet}: satisfying the individual user through
empathic home networks: summary",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "65--66",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "empathic systems; human factors; networks",
}
@Article{Riska:2009:EDL,
author = "Alma Riska and Erik Riedel",
title = "Evaluation of disk-level workloads at different time
scales",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "67--68",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639589",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reddy:2009:MDC,
author = "Vinith Reddy and Younghoon Kim and Srinivas Shakkottai
and A. L. Narasimha Reddy",
title = "{MultiTrack}: a delay and cost aware {P2P} overlay
architecture",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "69--70",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639590",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:2009:SOA,
author = "Sem Borst and Varun Gupta and Anwar Walid",
title = "Self-organizing algorithms for cache cooperation in
content distribution networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "71--72",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639591",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rubinstein:2009:SPA,
author = "Benjamin I. P. Rubinstein and Blaine Nelson and Ling
Huang and Anthony D. Joseph and Shing-hon Lau and
Satish Rao and Nina Taft and J. D. Tygar",
title = "Stealthy poisoning attacks on {PCA}-based anomaly
detectors",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "73--74",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639592",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider systems that use PCA-based detectors
obtained from a comprehensive view of the network's
traffic to identify anomalies in backbone networks. To
assess these detectors' susceptibility to adversaries
wishing to evade detection, we present and evaluate
short-term and long-term data poisoning schemes that
trade-off between poisoning duration and the volume of
traffic injected for poisoning. Stealthy Boiling Frog
attacks significantly reduce chaff volume,while only
moderately increasing poisoning duration. ROC curves
provide a comprehensive analysis of PCA-based detection
on contaminated data, and show that even small attacks
can undermine this otherwise successful anomaly
detector.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adversarial learning; network traffic analysis;
principal components analysis",
}
@Article{Down:2009:SDR,
author = "Douglas G. Down and H. Christian Gromoll and Amber L.
Puha",
title = "State-dependent response times via fluid limits in
shortest remaining processing time queues",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "75--76",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639593",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a single server queue with renewal
arrivals and i.i.d. service times, in which the server
employs the Shortest Remaining Processing Time (SRPT)
policy. We provide a fluid model (or formal law of
large numbers approximation) for this system. The
foremost payoff of our fluid model is a fluid level
approximation for the state-dependent response time of
a job of arbitrary size, that is, the amount of time it
spends in the system, given an arbitrary system
configuration at the time of its arrival.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2009:SPP,
author = "Jianwei Chen and Murali Annavaram and Michel Dubois",
title = "{SlackSim}: a platform for parallel simulations of
{CMPs} on {CMPs}",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "77--78",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639594",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel simulation is a technique to accelerate
microarchitecture simulation of target CMPs by
exploiting the inherent parallelism of host CMPs. In
this paper, we explore the simulation paradigm of
simulating each core of a target CMP in one thread and
the spreading the threads across the hardware thread
contexts of a host CMP. We introduce the concept of
slack simulation where the Pthreads simulating
different target cores do not synchronize after each
simulated cycle, but rather they are given some slack.
The slack is the difference in cycles between the
simulated times of any two target cores. Small
slacks,such as a few cycles, greatly improve the
efficiency of parallel CMP simulations, with no or
negligible simulation error. We have developed a
simulation framework called SlackSim to experiment with
various slack simulation schemes. Unlike previous
attempts to parallelize multiprocessor simulations on
distributed memory machines, SlackSim takes advantage
of the efficient sharing of data in the host CMP
architecture. We demonstrate the efficiency and
accuracy of some well-known slack simulation schemes
and of some new ones on SlackSim running on a
state-of-the-art CMP platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulati:2009:EAP,
author = "Ajay Gulati and Arif Merchant and Mustafa Uysal and
Pradeep Padala and Peter Varman",
title = "Efficient and adaptive proportional share {I/O}
scheduling",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "79--80",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639595",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2009:DDS,
author = "Yang Liu and Linfeng Zhang and Yong Guan",
title = "A distributed data streaming algorithm for
network-wide traffic anomaly detection",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "81--82",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639596",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Nowadays, Internet has serious security problems and
network failures that are hard to resolve, for example,
botnet attacks, polymorphic worm\slash virus spreading,
DDoS, and flash crowds. To address many of these
problems, we need to have a network-wide view of the
traffic dynamics, and more importantly, be able to
detect traffic anomaly in a timely manner. To our
knowledge, Principle Component Analysis (PCA)is the
best-known spatial detection method for the
network-wide traffic anomaly. However, existing
PCA-based solutions have scalability problems in that
they require $ O(m^2 n) $ running time and $ O(m n) $
space to analyze traffic measurements from $m$
aggregated traffic flows within a sliding window of the
length $n$. We propose a novel data streaming algorithm
for PCA-based network-wide traffic anomaly detection in
a distributed fashion. Our algorithm can archive $ O(w
n \log n)$ running time and $ O(w n)$ space at local
monitors,and $ O(m^2 \log n)$ running time and $ O(m
\log n)$ space at Network Operation Center (NOC), where
$w$ denotes the maximum number of traffic flows at a
local monitor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baccelli:2009:TMA,
author = "Fran{\c{c}}ois Baccelli and Bruno Kauffmann and Darryl
Veitch",
title = "Towards multihop available bandwidth estimation",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "83--84",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We evaluate the algorithm proposed in [1], which
estimates the residual bandwidth on each hop of an
Internet path using a para-metric model which consists
of a Kelly queueing network. The evaluation is driven
by simulation based on real network traces over a two
node path. Correction factors are proposed and
evaluated to cope with deviations from model
assumptions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nandi:2009:WMU,
author = "Animesh Nandi and Bobby Bhattacharjee and Peter
Druschel",
title = "What a mesh: understanding the design tradeoffs for
streaming multicast",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "85--86",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639598",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cooperative end-system multicast (CEM) is a promising
paradigm for Internet video distribution. Several CEM
systems have been proposed and deployed, but the
tradeoffs inherent in the different designs are not
well understood. In this work, we provide a common
framework in which different CEM design choices can be
empirically and systematically evaluated. Based on our
results, we conjecture that all CEM systems must abide
by a set of fundamental design constraints, which we
express in a simple model. By necessity, existing
system implementations couple the data- and
control-planes and often use different transport
protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2009:MCAb,
author = "Daniel Sadoc Menasche and Antonio A. Aragao Rocha and
Bin Li and Don Towsley and Arun Venkataramani",
title = "Modeling content availability in peer-to-peer swarming
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "87--88",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639599",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Iyer:2009:VPA,
author = "Ravi Iyer and Ramesh Illikkal and Li Zhao and Don
Newell and Jaideep Moses",
title = "Virtual platform architectures for resource metering
in datacenters",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "89--90",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With cloud and utility computing models gaining
significant momentum, data centers are increasingly
employing virtualization and consolidation as a means
to support a large number of disparate applications
running simultaneously on a CMP server. In such
environments, it is important to meter the usage of
resources by each datacenter application so that
customers can be charged accordingly. In this paper, we
describe a simple metering and chargeback model
(pay-as-you-go) and describe a solution based on
virtual platform architectures (VPA) to accurately
meter visible as well as transparent resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Cache/Memory; CMP; performance; quality of service;
resource sharing principles; service level agreements",
}
@Article{Kant:2009:CDE,
author = "Krishna Kant",
title = "Challenges in distributed energy adaptive computing",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "3--7",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710117",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fueled by burgeoning online services, energy
consumption in information technology (IT) equipment is
becoming a major concern from a variety of perspectives
including the continuation of Moore's Law for hardware
design, enabling sophisticated mobile client
functionality, mounting utility costs in data centers,
and increasing CO2 emissions associated with IT
manufacturing, distribution, usage and disposal. This
article discusses an approach where energy consumption
and related issues of heat dissipation and
sustainability are considered as the primary concerns
that drive the way computation and communication is
organized at both clients and servers. This article
describes the challenges in supporting such a
distributed energy adaptive computing paradigm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pan:2009:GBB,
author = "Xinghao Pan and Jiaqi Tan and Soila Kavulya and Rajeev
Gandhi and Priya Narasimhan",
title = "{Ganesha}: black-box diagnosis of {MapReduce}
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "8--13",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ganesha aims to diagnose faults transparently (in a
black-box manner) in MapReduce systems, by analyzing
OS-level metrics. Ganesha's approach is based on
peer-symmetry under fault-free conditions, and can
diagnose faults that manifest asymmetrically at nodes
within a MapReduce system. We evaluate Ganesha by
diagnosing Hadoop problems for the Gridmix Hadoop
benchmark on 10-node and 50-node MapReduce clusters on
Amazon's EC2. We also candidly highlight faults that
escape Ganesha's diagnosis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anand:2009:NNN,
author = "Ashok Anand and Aditya Akella",
title = "{NetReplay}: a new network primitive",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "14--19",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe Net-Replay, a new network
primitive to help application end points conduct
in-band characterization of the glitches they
encountered. In Net-Replay, each network infrastructure
element remembers a small amount of information for
every packet observed at the element over a certain
time interval. Furthermore, network elements expose a
simple 'packet marking' interface, using which they can
indicate to end-points whether or not they had seen a
particular packet in the past. When application
end-points observe glitches, they replay (i.e.
retransmit) the packets which observed the glitch and
leverage feedback from network elements to determine
the type and location of the glitch encountered by the
packets. We discuss how end-host network stacks should
be modified to leverage Net-Replay in this fashion. We
also consider how network infrastructure can support
Net-Replay in a low-overhead fashion.\par
We argue that Net-Replay can enable applications to
detect a variety of glitches and react to them in an
accurate and informed manner, while ensuring that the
infrastructure stays simple and fast. We believe that
proactive support from the network in the form of
Net-Replay-like functionality is crucial to ensure
robust performance of future Internet applications,
many of which are likely to be highly demanding and far
less tolerant of network glitches than traditional
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crocey:2009:QBE,
author = "Daniele Crocey and Marco Melliay and Emilio
Leonardiy",
title = "The quest for bandwidth estimation techniques for
large-scale distributed systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "20--25",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years the research community has developed
many techniques to estimate the end-to-end available
bandwidth of an Internet path. This important metric
can be potentially exploited to optimize the
performance of several distributed systems and, even,
to improve the effectiveness of the congestion control
mechanism of TCP. Thus, it has been suggested that some
existing estimation techniques could be used for this
purpose. However, existing tools were not designed for
large-scale deployments and were mostly validated in
controlled settings, considering only one measurement
running at a time. In this paper, we argue that current
tools, while offering good estimates when used alone,
might not work in large-scale systems where several
estimations severely interfere with each other. We
analyze the properties of the measurement paradigms
employed today and discuss their functioning, study
their overhead and analyze their interference. Our
testbed results show that current techniques are
insufficient as they are. Finally, we will discuss and
propose some principles that should be taken into
account for including available bandwidth measurements
in large-scale distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keeton:2009:DYK,
author = "Kimberly Keeton and Pankaj Mehra and John Wilkes",
title = "Do you know your {IQ?}: a research agenda for
information quality in systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "26--31",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710121",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Information quality (IQ) is a measure of how fit
information is for a purpose. Sometimes called Quality
of Information (QoI) by analogy with Quality of Service
(QoS), it quantifies whether the correct information is
being used to make a decision or take an action. Not
understanding when information is of adequate quality
can lead to bad decisions and catastrophic effects,
including system outages, increased costs, lost revenue
-- and worse. Quantifying information quality can help
improve decision making, but the ultimate goal should
be to select or construct information producers that
have the appropriate balance between information
quality and the cost of providing it. In this paper, we
provide a brief introduction to the field, argue the
case for applying information quality metrics in the
systems domain, and propose a research agenda to
explore this space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data quality; goal-directed design; information
processing pipeline; information quality; IQ; modeling;
prediction; QoI; uncertainty",
}
@Article{Casale:2009:AGB,
author = "Giuliano Casale and Amir Kalbasi and Diwakar
Krishnamurthy and Jerry Rolia",
title = "Automatically generating bursty benchmarks for
multitier systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "32--37",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710122",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Burstiness in resource consumption of requests has
been recently observed to be a fundamental performance
driver for multi-tier applications. This motivates the
need for a methodology to create benchmarks with
controlled burstiness that helps to improve the
effectiveness of system sizing efforts and makes
application testing more comprehensive. We tackle this
problem using a model-based technique for the automatic
and controlled generation of bursty benchmarks.
Phase-type models are constructed in an automated
manner to model the distribution of service demands
placed by user sessions on various system resources.
The models are then used to derive session submission
policies that result in user-specified levels of
service demand burstiness for resources at the
different tiers in a system. A case study using a
three-tier TPC-W testbed shows that our method is able
to control and predict burstiness for session service
demands and to cause dramatic latency and throughput
degradations that are not visible with the same session
mix and no burstiness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:2009:ACT,
author = "Joseph L. Hellerstein and Vance Morrison and Eric
Eilebrecht",
title = "Applying control theory in the real world: experience
with building a controller for the {.NET} thread pool",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "38--42",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710123",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been considerable interest in using control
theory to build web servers, database managers, and
other systems. We claim that the potential value of
using control theory cannot be realized in practice
without a methodology that addresses controller design,
testing, and tuning. Based on our experience with
building a controller for the .NET thread pool, we
develop a methodology that: (a) designs for
extensibility to integrate diverse control techniques,
(b) scales the test infrastructure to enable running a
large number of test cases, (c) constructs test cases
for which the ideal controller performance is known a
priori so that the outcomes of test cases can be
readily assessed, and (d) tunes controller parameters
to achieve good results for multiple performance
metrics. We conclude by discussing how our methodology
can be extended, especially to designing controllers
for distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2009:FRE,
author = "Alma Riska and Ningfang Mi and Evgenia Smirni and
Giuliano Casale",
title = "Feasibility regions: exploiting tradeoffs between
power and performance in disk drives",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "43--48",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710124",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Low utilization immediately suggests that placing the
system into a low power mode during idle times may
considerably decrease power consumption. As future
workload remains largely unknown, 'when' to initiate a
power saving mode and for 'how long' to stay in this
mode remains a challenging open problem, given that
performance degradation of future jobs should not be
compromised. We present a model and an algorithm that
manages to successfully explore feasible regions of
power and performance, and expose the system
limitations according to both measures. Extensive
analysis on a set of enterprise storage traces shows
the algorithm's robustness for successfully identifying
'when' and for 'how long' one should activate a power
saving mode given a set of power/performance targets
that are provided by the user.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Willinger:2009:ROS,
author = "Walter Willinger and Reza Rejaie and Mojtaba Torkjazi
and Masoud Valafar and Mauro Maggioni",
title = "Research on online social networks: time to face the
real challenges",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "49--54",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710125",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Online Social Networks (OSNs) provide a unique
opportunity for researchers to study how a combination
of technological, economical, and social forces have
been conspiring to provide a service that has attracted
the largest user population in the history of the
Internet. With more than half a billion of users and
counting, OSNs have the potential to impact almost
every aspect of networking, including measurement and
performance modeling and analysis, network architecture
and system design, and privacy and user behavior, to
name just a few. However, much of the existing OSN
research literature seems to have lost sight of this
unique opportunity and has avoided dealing with the new
challenges posed by OSNs. We argue in this position
paper that it is high time for OSN researcher to
exploit and face these challenges to provide a basic
understanding of the OSN ecosystem as a whole. Such an
understanding has to reflect the key role users play in
this system and must focus on the system's dynamics,
purpose and functionality when trying to illuminate the
main technological, economic, and social forces at work
in the current OSN revolution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tickoo:2009:MVM,
author = "Omesh Tickoo and Ravi Iyer and Ramesh Illikkal and Don
Newell",
title = "Modeling virtual machine performance: challenges and
approaches",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "55--60",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710126",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data centers are increasingly employing virtualization
and consolidation as a means to support a large number
of disparate applications running simultaneously on
server platforms. However, server platforms are still
being designed and evaluated based on performance
modeling of a single highly parallel application or a
set of homogeneous work-loads running simultaneously.
Since most future datacenters are expected to employ
server virtualization, this paper takes a look at the
challenges of modeling virtual machine (VM) performance
on a datacenter server. Based on vConsolidate (a server
virtualization benchmark) and latest multi-core
servers, we show that the VM modeling challenge
requires addressing three key problems: (a) modeling
the contention of visible resources (cores, memory
capacity, I/O devices, etc), (b) modeling the
contention of invisible resources (shared
microarchitecture resources, shared cache, shared
memory bandwidth, etc) and (c) modeling overheads of
virtual machine monitor (or hypervisor) implementation.
We take a first step to addressing this problem by
describing a VM performance modeling approach and
performing a detailed case study based on the
vConsolidate benchmark. We conclude by outlining
outstanding problems for future work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CMP; consolidation; measurement; modeling; performance
analysis; servers; virtualization",
}
@Article{Gulati:2009:MWD,
author = "Ajay Gulati and Chethan Kumar and Irfan Ahmad",
title = "Modeling workloads and devices for {IO} load balancing
in virtualized environments",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "61--66",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Virtualization has been effective in providing
performance isolation and proportional allocation of
resources, such as CPU and memory between VMs by using
automated distributed resource schedulers and VM
migration. Storage VMotion allows users to migrate
virtual hard disks from one data store to another
without stopping the virtual machine. There is a dire
need for an automated tool to manage storage resources
more effectively by doing virtual disk placement and
load balancing of workloads across multiple data
stores. Applicable beyond virtualization, this problem
is challenging because it requires modeling both
workloads and characterizing underlying devices.
Furthermore, device characteristics such as number of
disks backing a LUN, disk types etc. are hidden from
the hosts by the virtualization layer at the array. In
this paper, we propose a storage resource scheduler
(SRS) to manage virtual disk placement and automatic
load balancing using Storage VMotion. Our initial
results lead us to believe that we can effectively
model workloads and devices to improve overall storage
resource utilization in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fay:2009:WSM,
author = "Damien Fay and Hamed Haddadi and Andrew W. Moore and
Richard Mortier and Steve Uhlig and Almerima
Jamakovic",
title = "A weighted spectrum metric for comparison of
{Internet} topologies",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "67--72",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710129",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Comparison of graph structures is a frequently
encountered problem across a number of problem domains.
Comparing graphs requires a metric to discriminate
which features of the graphs are considered important.
The spectrum of a graph is often claimed to contain all
the information within a graph, but the raw spectrum
contains too much information to be directly used as a
useful metric. In this paper we introduce a metric, the
weighted spectral distribution, that improves on the
raw spectrum by discounting those eigenvalues believed
to be unimportant and emphasizing the contribution of
those believed to be important.\par
We use this metric to optimize the selection of
parameter values for generating Internet topologies.
Our metric leads to parameter choices that appear
sensible given prior knowledge of the problem domain:
the resulting choices are close to the default values
of the topology generators and, in the case of some
generators, fall within the expected region. This
metric provides a means for meaningfully optimizing
parameter selection when generating topologies intended
to share structure with, but not match exactly,
measured graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Illikkal:2010:PQP,
author = "Ramesh Illikkal and Vineet Chadha and Andrew Herdrich
and Ravi Iyer and Donald Newell",
title = "{PIRATE}: {QoS} and performance management in {CMP}
architectures",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "3--10",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773396",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As new multi-threaded usage models such as
virtualization and consolidation take advantage of
multiple cores in CMP architectures, the impact of
shared resource contention between VMs and user-level
applications introduces Quality of Service(QoS)
concerns and challenges. QoS-aware management of these
shared platform resources is therefore becoming
increasingly important. Various QoS schemes for
resource management have been recently proposed, but
most of these prior efforts have been focused on
controlling individual resource allocation based on
priority information passed down from the OS or
Hypervisor to system resources. The complexity of this
approach increases when multiple levels of resources
are associated with an application's performance and
power consumption. In this paper we employ simpler
rate-based QoS mechanisms which control the execution
rate of competing applications. To enable
differentiation between simultaneously running
applications' performance and power consumption, these
rate mechanisms need to dynamically adjust the
execution of application. Our proposed PI-RATE
architecture introduces a control-theoretic approach to
dynamically adjust the execution rate of each
application based on the QoS target and monitored
resource utilization. We evaluate three modes of
PI-RATE architecture --- cache QoS targets, performance
QoS targets and power QoS targets --- to show that the
PI-RATE architecture is flexible and effective at
enabling QoS in a CMP platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clock modulation; frequency scaling; integral
controller; proportional",
}
@Article{Dube:2010:PLL,
author = "Parijat Dube and Li Zhang and David Daly and Alan
Bivens",
title = "Performance of large low-associativity caches",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "11--18",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While it is known that lowering the associativity of
caches degrades cache performance, little is understood
about the degree of this effect or how to lessen the
effect, especially in very large caches. Most existing
works on cache performance are simulation or emulation
based and there is a lack of analytical\ models
characterizing performance in terms of different
configuration parameters such as line size, cache size,
associativity and workload specific parameters. We
develop analytical models to study performance of large
cache architectures by capturing the dependence of miss
ratio on associativity and other configuration
parameters. While high associativity may decrease cache
misses, for very large caches the corresponding
increase in hardware cost and power may be significant.
We use our models as well as simulation to study
different proposals for reducing misses in low
associativity caches, specifically, address space
randomization and victim caches. Our analysis provides
specific detail on the impact of these proposals, and a
clearer understanding of why they do or do not work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "address randomization; associativity; modeling; victim
cache",
}
@Article{Zhu:2010:ROW,
author = "Yaping Zhu and Jennifer Rexford and Subhabrata Sen and
Aman Shaikh",
title = "{Route Oracle}: where have all the packets gone?",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "19--25",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many network-management problems in large backbone
networks need the answer to a seemingly simple
question: where does a given IP packet, entering the
network at a particular place and time, leave the
network to continue on its path to the destination?
Answering this question at scale and in real time is
challenging for several reasons: (i) a destination IP
address could match several IP prefixes, (ii) the
longest-matching prefix may change over time, (iii) the
number of IP prefixes and routing protocol messages is
very large, and (iv) network-management applications
often require answers to this question for a large
number of destination IP addresses in real time. In
this paper, we present an efficient algorithm for
tracking prefix-match changes for ranges of IP
addresses. We then present the design, implementation,
and evaluation of the Route Oracle tool that answers
queries about routing changes on behalf of network
management applications. Our design of Route Oracle
includes several performance optimizations, such as
pre-processing of BGP update messages, and
parallelization of query processing. Experiments with
BGP measurement data from a large ISP backbone
demonstrate that our system answers queries in real
time and at scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doebel:2010:TVP,
author = "Bjoern Doebel and Peter Nobel and Eno Thereska and
Alice Zheng",
title = "Towards versatile performance models for complex,
popular applications",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "26--33",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773399",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Perhaps surprisingly, no practical performance models
exist for popular (and complex) client applications
such as Adobe's Designer suite, Microsoft's Office
suite and Visual Studio, Mozilla, Halo 3, etc. There is
currently no tool that automatically answers program
developers', IT administrators' and end-users' simple
what-if questions like 'what happens to the performance
of my favorite application X if I upgrade from Windows
Vista to Windows 7?'. This paper describes directions
we are taking for constructing practical, versatile
performance models to address this problem.\par
The directions we have taken have two paths. The first
path involves instrumenting applications better to
export their state and associated metrics. This
application-specific monitoring is always on and
interesting data is collected from real, 'in-the-wild'
deployments. The second path involves statistical
modeling techniques. The models we are experimenting
with require no modifications to the OS or applications
beyond the above instrumentation, and no explicit {\em
a priori\/} model on how an OS or application should
behave. We are in the process of learning from models
we have constructed for several Microsoft products,
including the Office suite, Visual Studio and Media
Player. This paper presents preliminary findings from a
large user deployment (several hundred thousand user
sessions) of these applications that show the coverage
and limitations of such models.\par
Early indications from this work point towards future
modeling strategies based on large amounts of data
collected in the field. We present our thoughts on what
this could imply for the SIGMETRICS community.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mishra:2010:TCC,
author = "Asit K. Mishra and Joseph L. Hellerstein and Walfredo
Cirne and Chita R. Das",
title = "Towards characterizing cloud backend workloads:
insights from {Google} compute clusters",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "34--41",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The advent of cloud computing promises highly
available, efficient, and flexible computing services
for applications such as web search, email, voice over
IP, and web search alerts. Our experience at Google is
that realizing the promises of cloud computing requires
an extremely scalable backend consisting of many large
compute clusters that are shared by application tasks
with diverse service level requirements for throughput,
latency, and jitter. These considerations impact (a)
capacity planning to determine which machine resources
must grow and by how much and (b) task scheduling to
achieve high machine utilization and to meet service
level objectives.\par
Both capacity planning and task scheduling require a
good understanding of task resource consumption (e.g.,
CPU and memory usage). This in turn demands simple and
accurate approaches to workload
classification-determining how to form groups of tasks
(workloads) with similar resource demands. One approach
to workload classification is to make each task its own
workload. However, this approach scales poorly since
tens of thousands of tasks execute daily on Google
compute clusters. Another approach to workload
classification is to view all tasks as belonging to a
single workload. Unfortunately, applying such a
coarse-grain workload classification to the diversity
of tasks running on Google compute clusters results in
large variances in predicted resource
consumptions.\par
This paper describes an approach to workload
classification and its application to the Google Cloud
Backend, arguably the largest cloud backend on the
planet. Our methodology for workload classification
consists of: (1) identifying the workload dimensions;
(2) constructing task classes using an off-the-shelf
algorithm such as k-means; (3) determining the break
points for qualitative coordinates within the workload
dimensions; and (4) merging adjacent task classes to
reduce the number of workloads. We use the foregoing,
especially the notion of qualitative coordinates, to
glean several insights about the Google Cloud Backend:
(a) the duration of task executions is bimodal in that
tasks either have a short duration or a long duration;
(b) most tasks have short durations; and (c) most
resources are consumed by a few tasks with long
duration that have large demands for CPU and memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2010:SIQ,
author = "Martin Arlitt and Keith Farkas and Subu Iyer and
Preethi Kumaresan and Sandro Rafaeli",
title = "Systematically improving the quality of {IT}
utilization data",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "42--49",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Efforts to reduce the cost of ownership for enterprise
IT environments are spurring the development and
deployment of data-driven management tools. Yet, IT
data is imperfect and these imperfections can lead to
inappropriate decisions that have significant technical
and business consequences. In this paper, we begin by
raising awareness of this problem through examples of
the imperfections that occur, and a discussion of their
causes and implications on IT management tasks. We then
introduce a systematic approach for addressing such
imperfections. Our approach allows best practices to be
readily shared, simplifies the construction of IT data
assurance solutions, and allows context-specific
corrections to be applied until the root cause(s) of
the imperfections can be fixed. To demonstrate the
value of our solution, we describe a capacity planning
use case. Application of our solution to an ongoing
capacity planning effort reduced the (human) planner's
time requirements by &\#8776;3x to &\#8776;6 hours,
while enabling him to evaluate the data quality of
&\#8776;5x more applications and for 9 imperfection
types rather than 1.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:2010:PMI,
author = "Jianying Hu and Yingdong Lu and Aleksandra
Mojsilovi{\'c} and Mayank Sharma and Mark S.
Squillante",
title = "Performance management of {IT} services delivery",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "50--57",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2010:BPI,
author = "Shuyi Chen and Kaustubh R. Joshi and Matti A. Hiltunen
and Richard D. Schlichting and William H. Sanders",
title = "Blackbox prediction of the impact of {DVFS} on
end-to-end performance of multitier systems",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "59--63",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic voltage and frequency scaling (DVFS) is a
well-known technique for gaining energy savings on
desktop and laptop computers. However, its use in
server settings requires careful consideration of any
potential impacts on end-to-end service performance of
hosted applications. In this paper, we develop a simple
metric called the \frequency gradient' that allows
prediction of the impact of changes in processor
frequency on the end-to-end transaction response times
of multitier applications. We show how frequency
gradients can be measured on a running system in a
push-button manner without any prior knowledge of
application semantics, structure, or configuration
settings. Using experimental results, we demonstrate
that the frequency gradients provide accurate
predictions, and enable end-to-end performance-aware
DVFS for multitier applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marwah:2010:QSI,
author = "Manish Marwah and Paulo Maciel and Amip Shah and
Ratnesh Sharma and Tom Christian and Virgilio Almeida
and Carlos Ara{\'u}jo and Erica Souza and Gustavo
Callou and Bruno Silva and S{\'e}rgio Galdino and Jose
Pires",
title = "Quantifying the sustainability impact of data center
availability",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "64--68",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data center availability is critical considering the
explosive growth in Internet services and people's
dependence on them. Furthermore, in recent years,
sustainability has become important. However, data
center designers have little information on the
sustainability impact of data center availability
architectures. In this paper, we present an approach to
estimate the sustainability impact of such
architectures. Availability is computed using
Stochastic Petri Net (SPN) models while an energy-based
lifecycle assessment (LCA) approach is used for
quantifying sustainability impact. The approach is
demonstrated on real life data center power
infrastructure architectures. Five different
architectures are considered and initial results show
that quantification of sustainability impact provides
important information to a data center designer in
evaluating availability architecture choices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "availability; data center; life-cycle assessment;
power infrastructure; stochastic Petri net;
sustainability",
}
@Article{Marsan:2010:EEM,
author = "Marco Ajmone Marsan and Michela Meo",
title = "Energy efficient management of two cellular access
networks",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "69--73",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we evaluate the energy saving that can
be achieved with the energy-aware cooperative
management of the cellular access networks of two
operators offering service over the same area. We
evaluate the amount of energy that can be saved by
using both networks in high traffic conditions, but
switching off one of the two during the periods when
traffic is so low that the desired quality of service
can be obtained with just one network. When one of the
two networks is off, its customers are allowed to roam
over the one that is on. Several alternatives are
studied, as regards the switch-off pattern: the one
that balances the switch-off frequencies, the one that
balances roaming costs, the one that balances energy
savings, and the one that maximizes the amount of saved
energy. Our results indicate that a huge amount of
energy can be saved, and suggest that, to reduce energy
consumption, new cooperative attitudes of the operators
should be encouraged with appropriate incentives, or
even enforced by regulation authorities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsiaflakis:2010:FGD,
author = "Paschalis Tsiaflakis and Yung Yi and Mung Chiang and
Marc Moonen",
title = "Fair greening for {DSL} broadband access",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "74--78",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Given that broadband access networks are an integral
part of the ICT infrastructure and that DSL is the most
widely deployed broadband access technology, greening
DSL has become important. Our recent work demonstrated
a promising tradeoff between data rate performance and
energy conservation. However, more greening still
implies possibly lower data rate, and allocating this
'price of greening' across interfering users needs to
be fair. This paper proposes four formulations of fair
greening in interference-limited networks, unifies them
into one general representation, and develops a unified
algorithm to solve them effectively. Simulations
quantify the intuitions on fairness in greening DSL, as
these four alternative approaches offer a range of
choices between maintaining a high sum data rate and
enforcing various definitions of fairness. Fairness of
allocating the price of greening is also interesting in
its own right.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ord:2010:PEM,
author = "Jason Ord and Ellen Chappell and Scott Canonico and
Tim Strecker",
title = "Product environmental metrics for printers",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "79--83",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hewlett--Packard's Imaging &\#38; Printing Group (IPG)
is charting a course towards environmental leadership
in its markets. To do this, IPG must look beyond just
satisfying the regulations and identify opportunities
for groundbreaking improvement. Carefully designed
metrics are necessary to guide design, chart progress
and set goals in this effort. IPG's Environmental
Strategy Team is leading an initiative to establish
these metrics internally. This paper describes the
development process the authors followed to construct
the initial metrics, which are focused on the 'carbon
footprint' of products under development. The paper
also discusses the lessons learned developing the
initial metrics, the results achieved thus far,
implementation details, challenges, and future
opportunities for improvement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "carbon footprint; environmental performance
measurement; environmental product metrics; printers;
printing",
}
@Article{Cayzer:2010:SHI,
author = "Steve Cayzer and Chris Preist",
title = "The sustainability hub: an information management tool
for analysis and decision making",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "84--88",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sustainability is becoming an increasingly important
driver for which decision makers -- consumers,
corporate and government -- rely on principled,
accurate and provenanced metrics to make appropriate
behavior changes. Our assertion here is that a
Sustainability Hub which manages such metrics together
with their context and chains of reasoning will be of
great benefit to the global community. In this paper we
explain the Hub vision and explain its triple value
proposition of context, chains of reasoning and
community. We propose a data model and describe our
existing prototype.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "chains of reasoning; community; context; information
management; metrics; provenance; sustainability",
}
@Article{Thereska:2010:PPM,
author = "Eno Thereska and Bjoern Doebel and Alice X. Zheng and
Peter Nobel",
title = "Practical performance models for complex, popular
applications",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "1--12",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Perhaps surprisingly, no practical performance models
exist for popular (and complex) client applications
such as Adobe's Creative Suite, Microsoft's Office and
Visual Studio, Mozilla, Halo 3, etc. There is currently
no tool that automatically answers program developers',
IT administrators' and end-users' simple what-if
questions like 'what happens to the performance of my
favorite application X if I upgrade from Windows Vista
to Windows 7?'. This paper describes our approach
towards constructing practical, versatile performance
models to address this problem. The goal is to have
these models be useful for application developers to
help expand application testing coverage and for IT
administrators to assist with understanding the
performance consequences of a software, hardware or
configuration change.\par
This paper's main contributions are in system building
and performance modeling. We believe we have built
applications that are easier to model because we have
proactively instrumented them to export their state and
associated metrics. This application-specific
monitoring is always on and interesting data is
collected from real, 'in-the-wild' deployments. The
models we are experimenting with are based on
statistical techniques. They require no modifications
to the OS or applications beyond the above
instrumentation, and no explicit a priori model on how
an OS or application should behave. We are in the
process of learning from models we have constructed for
several Microsoft products, including the Office suite,
Visual Studio and Media Player. This paper presents
preliminary findings from a large user deployment
(several hundred thousand user sessions) of these
applications that show the coverage and limitations of
such models. These findings pushed us to move beyond
averages/means and go into some depth into why client
application performance has an inherently large
variance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "developers; IT administrators; performance variance;
what-if",
}
@Article{Gast:2010:MFM,
author = "Nicolas Gast and Gaujal Bruno",
title = "A mean field model of work stealing in large-scale
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "13--24",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider a generic model of
computational grids, seen as several clusters of
homogeneous processors. In such systems, a key issue
when designing efficient job allocation policies is to
balance the workload over the different
resources.\par
We present a Markovian model for performance evaluation
of such a policy, namely work stealing (idle processors
steal work from others) in large-scale heterogeneous
systems. Using mean field theory, we show that when the
size of the system grows, it converges to a system of
deterministic ordinary differential equations that
allows one to compute the expectation of performance
functions (such as average response times) as well as
the distributions of these functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "grid computing; load balancing; mean field",
}
@Article{Balsamo:2010:UAP,
author = "Simonetta Balsamo and Peter G. Harrison and Andrea
Marin",
title = "A unifying approach to product-forms in networks with
finite capacity constraints",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "25--36",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In queueing networks with blocking, stations wishing
to transmit customers to a full queue are blocked and
need to take alternative action on completing a
service. In general, product-forms, i.e. separable
solutions for such a network's equilibrium state
probabilities, do not exist but some product-forms have
been obtained over the years in special cases, using a
variety of techniques. We show that the Reversed
Compound Agent Theorem (RCAT) can obtain these diverse
results in a uniform way by its direct application, so
unifying product-forms in networks with and without
blocking. New product-forms are also constructed for a
type of blocking we call `skipping', where a blocked
station sends its output-customers to the queue after
the one causing the blocking in that customer's path.
Finally, we investigate a novel congestion management
scheme for networks of finite-capacity queues in which
a station with a full queue transmits signals that
delete customers from upstream queues in order to
reduce incoming traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "product-form solutions; queueing theory",
}
@Article{Andrew:2010:OFR,
author = "Lachlan L. H. Andrew and Minghong Lin and Adam
Wierman",
title = "Optimality, fairness, and robustness in speed scaling
designs",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "37--48",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work examines fundamental tradeoffs incurred by a
speed scaler seeking to minimize the sum of expected
response time and energy use per job. We prove that a
popular speed scaler is 2-competitive for this
objective and no 'natural' speed scaler can do better.
Additionally, we prove that energy-proportional speed
scaling works well for both Shortest Remaining
Processing Time (SRPT) and Processor Sharing (PS) and
we show that under both SRPT and PS, gated-static speed
scaling is nearly optimal when the mean workload is
known, but that dynamic speed scaling provides
robustness against uncertain workloads. Finally, we
prove that speed scaling magnifies unfairness under
SRPT but that PS remains fair under speed scaling.
These results show that these speed scalers can achieve
any two, but only two, of optimality, fairness, and
robustness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy; fairness; PS; robustness; scheduling; speed
scaling; SRPT",
}
@Article{Dong:2010:EEE,
author = "Wei Dong and Yunhao Liu and Xiaofan Wu and Lin Gu and
Chun Chen",
title = "{Elon}: enabling efficient and long-term reprogramming
for wireless sensor networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "49--60",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a new mechanism called Elon for enabling
efficient and long-term reprogramming in wireless
sensor networks. Elon reduces the transferred code size
significantly by introducing the concept of replaceable
component. It avoids the cost of hardware reboot with a
novel software reboot mechanism. Moreover, it
significantly prolongs the reprogramming lifetime by
avoiding flash writes for TelosB nodes. Experimental
results show that Elon transfers up to 120--389 times
less information than Deluge, and 18-42 times less
information than Stream. The software reboot mechanism
that Elon applies reduces the rebooting cost by
50.4\%-53.87\% in terms of beacon packets, and 56.83\%
in terms of unsynchronized nodes. In addition, Elon
prolongs the reprogramming lifetime by a factor of
2.3.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "component; reboot; reprogramming; wireless sensor
network",
}
@Article{Karbasi:2010:DSN,
author = "Amin Karbasi and Sewoong Oh",
title = "Distributed sensor network localization from local
connectivity: performance analysis for the
{HOP-TERRAIN} algorithm",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "61--70",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the problem of determining the
node locations in ad-hoc sensor networks when only
connectivity information is available. In previous
work, we showed that the localization algorithm MDS-MAP
proposed by Y. Shang et al. is able to localize sensors
up to a bounded error decreasing at a rate inversely
proportional to the radio range r. The main limitation
of MDS-MAP is the assumption that the available
connectivity information is processed in a centralized
way.\par
In this work we investigate a practically important
question whether similar performance guarantees can be
obtained in a distributed setting. In particular, we
analyze the performance of the HOP-TERRAIN algorithm
proposed by C. Savarese et al. This algorithm can be
seen as a distributed version of the MDS-MAP algorithm.
More precisely, assume that the radio range r=o(1) and
that the network consists of n sensors positioned
randomly on a d-dimensional unit cube and d+1 anchors
in general positions. We show that when only
connectivity information is available, for every
unknown node i, the Euclidean distance between the
estimate x$_i$ and the correct position x$_i$ is
bounded by ||x$_i$ -x$_i$ ||",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed; localization; sensor network",
}
@Article{Xu:2010:SSP,
author = "Kuang Xu and Olivier Dousse and Patrick Thiran",
title = "Self-synchronizing properties of {CSMA} wireless
multi-hop networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "71--82",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We show that CSMA is able to spontaneously synchronize
transmissions in a wireless network with constant-size
packets, and that this property can be used to devise
efficient synchronized CSMA scheduling mechanisms
without message passing. Using tools from queuing
theory, we prove that for any connected wireless
networks with arbitrary interference constraints, it is
possible to implement self-synchronizing TDMA schedules
without any explicit message passing or clock
synchronization besides transmitting the original data
packets, and the interaction can be fully local in that
each node decides when to transmit next only by
overhearing its neighbors' transmissions. We also
provide a necessary and sufficient condition on the
emergence of self-synchronization for a given TDMA
schedule, and prove that such conditions for
self-synchronization can be checked in a finite number
of steps for a finite network topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "scheduling algorithm; self-synchronization; stochastic
recursive sequence",
}
@Article{Moallemi:2010:FLD,
author = "Ciamac Moallemi and Devavrat Shah",
title = "On the flow-level dynamics of a packet-switched
network",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "83--94",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The packet is the fundamental unit of transportation
in modern communication networks such as the Internet.
Physical layer scheduling decisions are made at the
level of packets, and packet-level models with
exogenous arrival processes have long been employed to
study network performance, as well as design scheduling
policies that more efficiently utilize network
resources. On the other hand, a user of the network is
more concerned with end-to-end bandwidth, which is
allocated through congestion control policies such as
TCP. Utility-based flow-level models have played an
important role in understanding congestion control
protocols. In summary, these two classes of models have
provided separate insights for flow-level and
packet-level dynamics of a network. In this paper, we
wish to study these two dynamics together. We propose a
joint flow-level and packet-level stochastic model for
the dynamics of a network, and an associated policy for
congestion control and packet scheduling that is based
on alpha-weighted policies from the literature. We
provide a fluid analysis for the model that establishes
the throughput optimality of the proposed policy, thus
validating prior insights based on separate
packet-level and flow-level models. By analyzing a
critically scaled fluid model under the proposed
policy, we provide constant factor performance bounds
on the delay performance and characterize the invariant
states of the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; flow-level model; maximum weight;
packet-level model; scheduling; utility maximization",
}
@Article{Godfrey:2010:ICD,
author = "P. Brighten Godfrey and Michael Schapira and Aviv
Zohar and Scott Shenker",
title = "Incentive compatibility and dynamics of congestion
control",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "95--106",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "his paper studies under what conditions congestion
control schemes can be both efficient, so that capacity
is not wasted, and incentive compatible, so that each
participant can maximize its utility by following the
prescribed protocol. We show that both conditions can
be achieved if routers run strict priority queueing
(SPQ) or weighted fair queueing (WFQ) and end-hosts run
any of a family of protocols which we call Probing
Increase Educated Decrease (PIED). A natural question
is whether incentive compatibility and efficiency are
possible while avoiding the per-flow processing of WFQ.
We partially address that question in the negative by
showing that any policy satisfying a certain 'locality'
condition cannot guarantee both properties.\par
Our results also have implication for convergence to
some steady-state throughput for the flows. Even when
senders transmit at a fixed rate (as in a UDP flow
which does not react to congestion), feedback effects
among the routers can result in complex dynamics which
do not appear in the simple topologies studied in past
work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; incentives; queueing; TCP",
}
@Article{Shah:2010:DCG,
author = "Devavrat Shah and Jinwoo Shin",
title = "Dynamics in congestion games",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "107--118",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Game theoretic modeling and equilibrium analysis of
congestion games have provided insights in the
performance of Internet congestion control, road
transportation networks, etc. Despite the long history,
very little is known about their transient (non
equilibrium) performance. In this paper, we are
motivated to seek answers to questions such as how long
does it take to reach equilibrium, when the system does
operate near equilibrium in the presence of dynamics,
e.g. nodes join or leave, or the tradeoff between
performance and the rate of dynamics. In this pursuit,
we provide three contributions in this paper. First, a
novel probabilistic model to capture realistic
behaviors of agents allowing for the possibility of
arbitrariness in conjunction with rationality. Second,
evaluation of (a) time to converge to equilibrium under
this behavior model and (b) distance to Nash
equilibrium. Finally, determination of tradeoff between
the rate of dynamics and quality of performance
(distance to equilibrium) which leads to an interesting
uncertainty principle. The novel technical ingredients
involve analysis of logarithmic Sobolov constant of
Markov process with time varying state space and
methodically this should be of broader interest in the
context of dynamical systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion game; logarithmic Sobolov constant;
logit-response",
}
@Article{Xiang:2010:ORS,
author = "Liping Xiang and Yinlong Xu and John C. S. Lui and
Qian Chang",
title = "Optimal recovery of single disk failure in {RDP} code
storage systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "119--130",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern storage systems use thousands of inexpensive
disks to meet the storage requirement of applications.
To enhance the data availability, some form of
redundancy is used. For example, conventional RAID-5
systems provide data availability for single disk
failure only, while recent advanced coding techniques
such as row-diagonal parity (RDP) can provide data
availability with up to two disk failures. To reduce
the probability of data unavailability, whenever a
single disk fails, disk recovery (or rebuild) will be
carried out. We show that conventional recovery scheme
of RDP code for a single disk failure is inefficient
and suboptimal. In this paper, we propose an optimal
and efficient disk recovery scheme, Row-Diagonal
Optimal Recovery (RDOR), for single disk failure of RDP
code that has the following properties: (1) it is read
optimal in the sense that it issues the smallest number
of disk reads to recover the failed disk; (2) it has
the load balancing property that all surviving disks
will be subjected to the same amount of additional
workload in rebuilding the failed disk. We carefully
explore the design state space and theoretically show
the optimality of RDOR. We carry out performance
evaluation to quantify the merits of RDOR on some
widely used disks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk failure; raid recovery; RDP code; recovery
algorithm",
}
@Article{Ghanbari:2010:QLR,
author = "Saeed Ghanbari and Gokul Soundararajan and Cristiana
Amza",
title = "A query language and runtime tool for evaluating
behavior of multi-tier servers",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "131--142",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As modern multi-tier systems are becoming increasingly
large and complex, it becomes more difficult for system
analysts to understand the overall behavior of the
system, and diagnose performance problems. To assist
analysts inspect performance behavior, we introduce
SelfTalk, a novel declarative language that allows
analysts to query and understand the status of a large
scale system. SelfTalk is sufficiently expressive to
encode an analyst's high-level hypotheses about system
invariants, normal correlations between system metrics,
or other a priori derived performance models, such as,
'I expect that the throughputs of interconnected system
components are linearly correlated'. Given a
hypothesis, Dena, our runtime support system,
instantiates and validates it using actual monitoring
data within specific system configurations. We evaluate
SelfTalk/Dena by posing several hypotheses about system
behavior and querying Dena to validate system behavior
in a multi-tier dynamic content server. We find that
Dena automatically validates the system performance
based on the pre-existing hypotheses and helps to
diagnose system misbehavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "expectation; hypothesis; management; performance
models",
}
@Article{Goel:2010:SSQ,
author = "Ashish Goel and Pankaj Gupta",
title = "Small subset queries and bloom filters using ternary
associative memories, with applications",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "143--154",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Associative memories offer high levels of parallelism
in matching a query against stored entries. We design
and analyze an architecture which uses {\em single\/}
lookup into a Ternary Content Addressable Memory (TCAM)
to solve the subset query problem for small sets, i.e.,
to check whether a given set (the query) contains (or
alternately, is contained in) any one of a large
collection of sets in a database. We use each TCAM
entry as a small Ternary Bloom Filter (each 'bit' of
which is one of {0,1,wildcard}) to store one of the
sets in the collection. Like Bloom filters, our
architecture is susceptible to false positives. Since
each TCAM entry is quite small, asymptotic analyses of
Bloom filters do not directly apply. Surprisingly, we
are able to show that the asymptotic false positive
probability formula can be safely used if we penalize
the small Bloom filter by taking away just one bit of
storage and adding just half an extra set element
before applying the formula. We believe that this
analysis is independently interesting. The subset query
problem has applications in databases, network
intrusion detection, packet classification in Internet
routers, and Information Retrieval. We demonstrate our
architecture on one illustrative streaming application
-- intrusion detection in network traffic. Be shingling
(i.e., taking consecutive bytes of) the strings in the
database, we can perform a single subset query and
hence a single TCAM search, to skip many bytes in the
stream. We evaluate our scheme on the open source CLAM
anti-virus database, for {\em worst-case\/} as well as
random streams. Our architecture appears to be at least
one order of magnitude faster than previous approaches.
Since the individual Bloom filters must fit in a single
TCAM entry (currently 72 to 576 bits), our solution
applies only when each set is of a small cardinality.
However, this is sufficient for many typical
applications. Also, recent algorithms for the
subset-query problem use a small-set version as a
subroutine",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bloom filters; subset queries; TCAM",
}
@Article{Laadan:2010:TLA,
author = "Oren Laadan and Nicolas Viennot and Jason Nieh",
title = "Transparent, lightweight application execution replay
on commodity multiprocessor operating systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "155--166",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present Scribe, the first system to provide
transparent, low-overhead application record-replay and
the ability to go live from replayed execution. Scribe
introduces new lightweight operating system mechanisms,
rendezvous and sync points, to efficiently record
nondeterministic interactions such as related system
calls, signals, and shared memory accesses. Rendezvous
points make a partial ordering of execution based on
system call dependencies sufficient for replay,
avoiding the recording overhead of maintaining an exact
execution ordering. Sync points convert asynchronous
interactions that can occur at arbitrary times into
synchronous events that are much easier to record and
replay.\par
We have implemented Scribe without changing, relinking,
or recompiling applications, libraries, or operating
system kernels, and without any specialized hardware
support such as hardware performance counters. It works
on commodity Linux operating systems, and commodity
multi-core and multiprocessor hardware. Our results
show for the first time that an operating system
mechanism can correctly and transparently record and
replay multi-process and multi-threaded applications on
commodity multiprocessors. Scribe recording overhead is
less than 2.5\% for server applications including
Apache and MySQL, and less than 15\% for desktop
applications including Firefox, Acrobat, OpenOffice,
parallel kernel compilation, and movie playback.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "debugging; fault-tolerance; record-replay;
virtualization",
}
@Article{Ni:2010:CSP,
author = "Jian Ni and R. Srikant and Xinzhou Wu",
title = "Coloring spatial point processes with applications to
peer discovery in large wireless networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "167--178",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study distributed channel assignment
in wireless networks with applications to peer
discovery in ad hoc wireless networks. We model channel
assignment as a coloring problem for spatial point
processes in which n nodes are located in a unit cube
uniformly at random and each node is assigned one of K
colors, where each color represents a channel. The
objective is to maximize the spatial separation between
nodes of the same color. In general, it is hard to
derive the optimal coloring algorithm and therefore, we
consider a natural greedy coloring algorithm, first
proposed in [5]. We prove two key results: (i) with
just a small number of colors when K is roughly of the
order of log(n) loglog(n), the distance separation
achieved by the greedy coloring algorithm
asymptotically matches the optimal distance separation
that can be achieved by an algorithm which is allowed
to select the locations of the nodes but is allowed to
use only one color, and (ii) when K = Omega(log(n)),
the greedy coloring algorithm asymptotically achieves
the best distance separation that can be achieved by an
algorithm which is allowed to both optimally color and
place nodes. The greedy coloring algorithm is also
shown to dramatically outperform a simple random
coloring algorithm. Moreover, the results continue to
hold under node mobilities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel assignment; coloring algorithms; spatial point
processes; wireless networks",
}
@Article{vandeVen:2010:OTB,
author = "Peter M. van de Ven and Augustus J. E. M. Janssen and
Johan S. H. van Leeuwaarden",
title = "Optimal tradeoff between exposed and hidden nodes in
large wireless networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "179--190",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless networks equipped with the CSMA protocol are
subject to collisions due to interference. For a given
interference range we investigate the tradeoff between
collisions (hidden nodes) and unused capacity (exposed
nodes). We show that the sensing range that maximizes
throughput critically depends on the activation rate of
nodes. For infinite line networks, we prove the
existence of a threshold: When the activation rate is
below this threshold the optimal sensing range is small
(to maximize spatial reuse). When the activation rate
is above the threshold the optimal sensing range is
just large enough to preclude all collisions.
Simulations suggest that this threshold policy extends
to more complex linear and non-linear topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "carrier-sensing range; exposed nodes; hidden nodes;
Markov processes; multi-access; throughput; wireless
networks",
}
@Article{Liu:2010:SMW,
author = "Shihuan Liu and Lei Ying and R. Srikant",
title = "Scheduling in multichannel wireless networks with
flow-level dynamics",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "191--202",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies scheduling in multichannel wireless
networks with flow-level dynamics. We consider a
downlink network with a single base station, M channels
(frequency bands), and multiple mobile users (flows).
We also assume mobiles dynamically join the network to
receive finite-size files and leave after downloading
the complete files. A recent study [16] has shown that
the MaxWeight algorithm fails to be throughput-optimal
under this flow-level dynamics. The main contribution
of this paper is the development of joint
channel-assignment and workload-based scheduling
algorithms for multichannel downlink networks with
dynamic flow arrivals/departures. We prove that these
algorithms are throughput-optimal. Our simulations
further demonstrate that a hybrid channel-assignment
and workload-based scheduling algorithm significantly
improves the network performance (in terms of both
file-transfer delay and blocking probability) compared
to the existing algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flow-level dynamics; multichannel downlink network;
wireless scheduling",
}
@Article{Shah:2010:DSC,
author = "Devavrat Shah and Tauhid Zaman",
title = "Detecting sources of computer viruses in networks:
theory and experiment",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "203--214",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We provide a systematic study of the problem of
finding the source of a computer virus in a network. We
model virus spreading in a network with a variant of
the popular SIR model and then construct an estimator
for the virus source. This estimator is based upon a
novel combinatorial quantity which we term rumor
centrality. We establish that this is an ML estimator
for a class of graphs. We find the following surprising
threshold phenomenon: on trees which grow faster than a
line, the estimator always has non-trivial detection
probability, whereas on trees that grow like a line,
the detection probability will go to 0 as the network
grows. Simulations performed on synthetic networks such
as the popular small-world and scale-free networks, and
on real networks such as an Internet AS network and the
U.S. electric power grid network, show that the
estimator either finds the source exactly or within a
few hops in different network topologies. We compare
rumor centrality to another common network centrality
notion known as distance centrality. We prove that on
trees, the rumor center and distance center are
equivalent, but on general networks, they may differ.
Indeed, simulations show that rumor centrality
outperforms distance centrality in finding virus
sources in networks which are not tree-like.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "epidemics; estimation",
}
@Article{Misra:2010:IPA,
author = "Vishal Misra and Stratis Ioannidis and Augustin
Chaintreau and Laurent Massouli{\'e}",
title = "Incentivizing peer-assisted services: a fluid
{Shapley} value approach",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "215--226",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new generation of content delivery networks for live
streaming, video on demand, and software updates takes
advantage of a peer-to-peer architecture to reduce
their operating cost. In contrast with previous
uncoordinated peer-to-peer schemes, users opt-in to
dedicate part of the resources they own to help the
content delivery, in exchange for receiving the same
service at a reduced price. Such incentive mechanisms
are appealing, as they simplify coordination and
accounting. However, they also increase a user's
expectation that she will receive a fair price for the
resources she provides. Addressing this issue carefully
is critical in ensuring that all interested
parties--including the provider--are willing to
participate in such a system, thereby guaranteeing its
stability.\par
In this paper, we take a cooperative game theory
approach to identify the ideal incentive structure that
follows the axioms formulated by Lloyd Shapley. This
ensures that each player, be it the provider or a peer,
receives an amount proportional to its contribution and
bargaining power when entering the game. In general,
the drawback of this ideal incentive structure is its
computational complexity. However, we prove that as the
number of peers receiving the service becomes large,
the Shapley value received by each player approaches a
fluid limit. This limit follows a simple closed form
expression and can be computed in several scenarios of
interest: by applying our technique, we show that
several peer-assisted services, deployed on both wired
and wireless networks, can benefit from important cost
and energy savings with a proper incentive structure
that follows simple compensation rules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cooperative game theory; incentive mechanisms",
}
@Article{Ma:2010:LPM,
author = "Yadi Ma and Suman Banerjee and Shan Lu and Cristian
Estan",
title = "Leveraging parallelism for multi-dimensional packet
classification on software routers",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "227--238",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811065",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a software-based solution to the
multi-dimensional packet classification problem which
can operate at high line speeds, e.g., in excess of 10
Gbps, using high-end multi-core desktop platforms
available today. Our solution, called Storm, leverages
a common notion that a subset of rules are likely to be
popular over short durations of time. By identifying a
suitable set of popular rules one can significantly
speed up existing software-based classification
algorithms. A key aspect of our design is in
partitioning processor resources into various relevant
tasks, such as continuously computing the popular rules
based on a sampled subset of traffic, fast
classification for traffic that matches popular rules,
dealing with packets that do not match the most popular
rules, and traffic sampling. Our results show that by
using a single 8-core Xeon processor desktop platform,
it is possible to sustain classification rates of more
than 15 Gbps for representative rule sets of size in
excess of 5-dimensional 9000 rules, with no packet
losses. This performance is significantly superior to a
8-way implementation of a state-of-the-art packet
classification software system running on the same
8-core machine. Therefore, we believe that our design
of packet classification functions can be a useful
classification building block for RouteBricks-style
designs, where a core router might be constructed as a
mesh of regular desktop machines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; parallelism; storm",
}
@Article{Shah:2010:QPW,
author = "Devavrat Shah and John N. Tsitsiklis and Yuan Zhong",
title = "Qualitative properties of $ \alpha $-weighted
scheduling policies",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "239--250",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811067",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a switched network, a fairly general
constrained queueing network model that has been used
successfully to model the detailed packet-level
dynamics in communication networks, such as
input-queued switches and wireless networks. The main
operational issue in this model is that of deciding
which queues to serve, subject to certain
constraints.\par
In this paper, we study qualitative performance
properties of the well known $ \alpha $-weighted
scheduling policies. The stability, in the sense of
positive recurrence, of these policies has been well
understood. We establish exponential upper bounds on
the tail of the steady-state distribution of the
backlog.\par
Along the way, we prove finiteness of the expected
steady-state backlog when $ \alpha < 1$, a property
that was known only for $ \alpha \geq 1$.\par
Finally, we analyze the excursions of the maximum
backlog over a finite time horizon for $ \alpha $ $
\geq $ 1. As a consequence, for $ \alpha $ $ \geq $ 1,
we establish the full state space collapse property.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "exponential bound; Markov chain; maximum weight-alpha;
state space collapse; switched network",
}
@Article{Casale:2010:CMD,
author = "Giuliano Casale and Ningfang Mi and Evgenia Smirni",
title = "{CWS}: a model-driven scheduling policy for correlated
workloads",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "251--262",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811068",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We define CWS, a non-preemptive scheduling policy for
workloads with correlated job sizes. CWS tackles the
scheduling problem by inferring the expected sizes of
upcoming jobs based on the structure of correlations
and on the outcome of past scheduling decisions. Size
prediction is achieved using a class of Hidden Markov
Models (HMM) with continuous observation densities that
describe job sizes. We show how the forward-backward
algorithm of HMMs applies effectively in scheduling
applications and how it can be used to derive
closed-form expressions for size prediction. This is
particularly simple to implement in the case of
observation densities that are phase-type (PH-type)
distributed, where existing fitting methods for
Markovian point processes may also simplify the
parameterization of the HMM workload model.\par
Based on the job size predictions, CWS emulates
size-based policies which favor short jobs, with
accuracy depending mainly on the HMM used to
parametrize the scheduling algorithm. Extensive
simulation and analysis illustrate that CWS is
competitive with policies that assume exact information
about the workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "correlated workload; model-driven scheduling; response
time; stochastic scheduling",
}
@Article{Zheng:2010:RAU,
author = "Haoqiang Zheng and Jason Nieh",
title = "{RSIO}: automatic user interaction detection and
scheduling",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "263--274",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811069",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present RSIO, a processor scheduling framework for
improving the response time of latency-sensitive
applications by monitoring accesses to I/O channels and
inferring when user interactions occur. RSIO
automatically identifies processes involved in a user
interaction and boosts their priorities at the time the
interaction occurs to improve system response time.
RSIO also detects processes indirectly involved in
processing an interaction, automatically accounting for
dependencies and boosting their priorities accordingly.
RSIO works with existing schedulers and requires no
application modifications to identify periods of
latency-sensitive application activity. We have
implemented RSIO in Linux and measured its
effectiveness on microbenchmarks and real applications.
Our results show that RSIO is easy to use and can
provide substantial improvements in system performance
for latency-sensitive applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dependencies; interactive applications; scheduling",
}
@Article{Bramson:2010:RLB,
author = "Maury Bramson and Yi Lu and Balaji Prabhakar",
title = "Randomized load balancing with general service time
distributions",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "275--286",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811071",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Randomized load balancing greatly improves the sharing
of resources in a number of applications while being
simple to implement. One model that has been
extensively used to study randomized load balancing
schemes is the supermarket model. In this model, jobs
arrive according to a rate-n\lambda Poisson process at
a bank of n rate-1 exponential server queues. A notable
result, due to Vvedenskaya {\em et.al.\/} (1996),
showed that when each arriving job is assigned to the
shortest of d $ \geq $ 2 randomly chosen queues, the
equilibrium queue sizes decay doubly exponentially in
the limit as n to $ \infty $. This is a substantial
improvement over the case d=1, where queue sizes decay
exponentially.\par
The method of analysis used in the above paper and in
the subsequent literature applies to jobs with
exponential service time distributions and does not
easily generalize. It is desirable to study load
balancing models with more general, especially
heavy-tailed, service time distributions since such
service times occur widely in practice.\par
This paper describes a modularized program for treating
randomized load balancing problems with general service
time distributions and service disciplines. The program
relies on an {\em ansatz\/} which asserts that any
finite set of queues in a randomized load balancing
scheme becomes independent as n to $ \infty $. This
allows one to derive queue size distributions and other
performance measures of interest. We establish the {\em
ansatz\/} when the service discipline is FIFO and the
service time distribution has a decreasing hazard rate
(this includes heavy-tailed service times). Assuming
the {\em ansatz}, we also obtain the following results:
(i) as n to $ \infty $, the process of job arrivals at
any fixed queue tends to a Poisson process whose rate
depends on the size of the queue, (ii) when the service
discipline at each server is processor sharing or LIFO
with preemptive resume, the distribution of the number
of jobs is insensitive to the service distribution, and
(iii) the tail behavior of the queue-size distribution
in terms of the service distribution for the FIFO
service discipline.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic independence; load balancing; randomized
algorithms",
}
@Article{Ganesh:2010:LBR,
author = "Ayalvadi Ganesh and Sarah Lilienthal and D. Manjunath
and Alexandre Proutiere and Florian Simatos",
title = "Load balancing via random local search in closed and
open systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "287--298",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811072",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we analyze the performance of random
{\em load resampling and migration\/} strategies in
parallel server systems. Clients initially attach to an
arbitrary server, but may switch servers independently
at random instants of time in an attempt to improve
their service rate. This approach to load balancing
contrasts with traditional approaches where clients
make smart server selections upon arrival (e.g.,
Join-the-Shortest-Queue policy and variants thereof).
Load resampling is particularly relevant in scenarios
where clients cannot predict the load of a server
before being actually attached to it. An important
example is in wireless spectrum sharing where clients
try to share a set of frequency bands in a distributed
manner.\par
We first analyze the natural {\em Random Local Search
(RLS)\/} strategy. Under this strategy, after sampling
a new server randomly, clients only switch to it if
their service rate is improved. In closed systems,
where the client population is fixed, we derive tight
estimates of the time it takes under RLS strategy to
balance the load across servers. We then study open
systems where clients arrive according to a random
process and leave the system upon service completion.
In this scenario, we analyze how client migrations
within the system interact with the system dynamics
induced by client arrivals and departures. We compare
the load-aware RLS strategy to a load-oblivious
strategy in which clients just randomly switch server
without accounting for the server loads. Surprisingly,
we show that both load-oblivious and load-aware
strategies stabilize the system whenever this is at all
possible. We further demonstrate, using large-system
asymptotics, that the average client sojourn time under
the load-oblivious strategy is not considerably reduced
when clients apply smarter load-aware strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mean field asymptotics; stability analysis",
}
@Article{Zhao:2010:UMF,
author = "Haiquan (Chuck) Zhao and Cathy H. Xia and Zhen Liu and
Don Towsley",
title = "A unified modeling framework for distributed resource
allocation of general fork and join processing
networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "299--310",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811073",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the problem of distributed
resource allocation in general fork and join processing
networks. The problem is motivated by the complicated
processing requirements arising from distributed data
intensive computing. In such applications, the
underlying data processing software consists of a rich
set of semantics that include synchronous and
asynchronous data fork and data join. The different
types of semantics and processing requirements
introduce complex interdependence between various data
flows within the network.\par
We study the distributed resource allocation problem in
such systems with the goal of achieving the maximum
total utility of output streams. Past research has
dealt with networks with specific types of fork/join
semantics, but none of them included all four types. We
propose a novel modeling framework that can represent
all combinations of fork and join semantics, and
formulate the resource allocation problem as a convex
optimization problem on this model. We propose a
shadow-queue based decentralized iterative algorithm to
solve the resource allocation problem. We show that the
algorithm guarantees optimality and demonstrate through
simulation that it can adapt quickly to dynamically
changing environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithm; fork and join networks;
resource allocation",
}
@Article{Ioannidis:2010:DCH,
author = "Stratis Ioannidis and Laurent Massoulie and Augustin
Chaintreau",
title = "Distributed caching over heterogeneous mobile
networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "311--322",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811075",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sharing content over a mobile network through
opportunistic contacts has recently received
considerable attention.\par
In proposed scenarios, users store content they
download in a local cache and share it with other users
they meet, e.g., via Bluetooth or WiFi. The storage
capacity of mobile devices is typically limited;
therefore, identifying which content a user should
store in her cache is a fundamental problem in the
operation of any such content distribution
system.\par
In this work, we propose Psephos, a novel mechanism for
determining the caching policy of each mobile user.
Psephos is fully distributed: users compute their own
policies individually, in the absence of a central
authority. Moreover, it is designed for a heterogeneous
environment, in which demand for content, access to
resources, and mobility characteristics may vary across
different users. Most importantly, the caching policies
computed by our mechanism are optimal: we rigorously
show that Psephos maximizes the system's social
welfare. Our results are derived formally using
techniques from stochastic approximation and convex
optimization; to the best of our knowledge, our work is
the first to address caching with heterogeneity in a
fully distributed manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "caching; content distribution; heterogeneity;
opportunistic networks",
}
@Article{Antunes:2010:AFI,
author = "Nelson Antunes and Gon{\c{c}}alo Jacinto and
Ant{\'o}nio Pacheco",
title = "An analytical framework to infer multihop path
reliability in {MANETs}",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "323--332",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to complexity and intractability reasons, most of
the analytical studies on the reliability of
communication paths in mobile ad hoc networks are based
on the assumption of link independence. In this paper,
an analytical framework is developed to characterize
the random behavior of a multihop path and derive path
metrics to characterize the reliability of paths. This
is achieved through the modeling of a multihop path as
a PDMP (piecewise deterministic Markov process). Two
path based metrics are obtained as expectations of
functionals of the process: the mean path duration and
the path persistence. We show that these metrics are
the unique solution of a set of integro-differential
equations and provide a recursive scheme for their
computation. Finally, numerical results illustrate the
computation of the metrics; these results are compared
with independent link approximation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mobile ad hoc networks; mobility; multihop path
reliability; piecewise deterministic Markov processes;
random walk",
}
@Article{Coffman:2010:CFD,
author = "Ed Coffman and Philippe Robert and Florian Simatos and
Shuzo Tarumi and Gil Zussman",
title = "Channel fragmentation in dynamic spectrum access
systems: a theoretical study",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "333--344",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811077",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic Spectrum Access systems exploit temporarily
available spectrum ('white spaces') and can spread
transmissions over a number of non-contiguous
sub-channels. Such methods are highly beneficial in
terms of spectrum utilization. However, excessive
fragmentation degrades performance and hence off-sets
the benefits. Thus, there is a need to study these
processes so as to determine how to ensure acceptable
levels of fragmentation. Hence, we present experimental
and analytical results derived from a mathematical
model. We model a system operating at capacity serving
requests for bandwidth by assigning a collection of
gaps (sub-channels) with no limitations on the fragment
size. Our main theoretical result shows that even if
fragments can be arbitrarily small, the system does not
degrade with time. Namely, the average total number of
fragments remains bounded. Within the very difficult
class of dynamic fragmentation models (including models
of storage fragmentation), this result appears to be
the first of its kind. Extensive experimental results
describe behavior, at times unexpected, of
fragmentation under different algorithms. Our model
also applies to dynamic linked-list storage allocation,
and provides a novel analysis in that domain. We prove
that, interestingly, the 50\% rule of the classical
(non-fragmented) allocation model carries over to our
model. Overall, the paper provides insights into the
potential behavior of practical fragmentation
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cognitive radio; dynamic spectrum access; ergodicity
of Markov chains; fragmentation",
}
@Article{Bermond:2010:DSA,
author = "Jean-Claude Bermond and Dorian Mazauric and Vishal
Misra and Philippe Nain",
title = "A distributed scheduling algorithm for wireless
networks with constant overhead and arbitrary binary
interference",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "345--346",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithm; interference; stability;
transmission scheduling; wireless network",
}
@Article{Sagnol:2010:SOD,
author = "Guillaume Sagnol and Mustapha Bouhtou and St{\'e}phane
Gaubert",
title = "Successive $c$-optimal designs: a scalable technique
to optimize the measurements on large networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "347--348",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new approach to optimize the deployment
and the sampling rates of network monitoring tools,
such as Netflow, on a large IP network. It reduces to
solving a stochastic sequence of Second Order Cone
Programs. We validate our approach with experiments
relying on real data from a commercial network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "c-optimality; netflow; optimal experimental design;
SOCP",
}
@Article{Cuevas:2010:DDB,
author = "Rub{\'e}n Cuevas and Nikolaos Laoutaris and Xiaoyuan
Yang and Georgos Siganos and Pablo Rodriguez",
title = "Deep diving into {BitTorrent} locality",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "349--350",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A substantial amount of work has recently gone into
localizing BitTorrent traffic within an ISP in order to
avoid excessive and often times unnecessary transit
costs. In this work we aim to answer yet unanswered
questions such as: what is the minimum and the maximum
transit traffic reduction across hundreds of ISPs?,
what are the win-win boundaries for ISPs and their
users?, what is the maximum amount of transit traffic
that can be localized without requiring fine-grained
control of inter-AS overlay connections?, what is the
impact to transit traffic from upgrades of residential
broadband speeds?.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; locality; measurements",
}
@Article{Jin:2010:IAN,
author = "Yu Jin and Nick Duffield and Patrick Haffner and
Subhabrata Sen and Zhi-Li Zhang",
title = "Inferring applications at the network layer using
collective traffic statistics",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "351--352",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a novel technique for
inferring the distribution of application classes
present in the aggregated traffic flows between
endpoints, which exploits both the statistics of the
traffic flows, and the spatial distribution of those
flows across the network. Our method employs a two-step
supervised model, where the bootstrapping step provides
initial (inaccurate) inference on the traffic
application classes, and the graph-based calibration
step adjusts the initial inference through the
collective spatial traffic distribution. In evaluations
using real traffic flow measurements from a large ISP,
we show how our method can accurately classify
application types within aggregate traffic between
endpoints, even without the knowledge of ports and
other traffic features. While the bootstrap estimate
classifies the aggregates with 80\% accuracy,
incorporating spatial distributions through calibration
increases the accuracy to 92\%, i.e., roughly halving
the number of errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "application identification; collective traffic
statistics; graph-based calibration; two-step model",
}
@Article{Anselmi:2010:PAP,
author = "Jonatha Anselmi and Bruno Gaujal",
title = "The price of anarchy in parallel queues revisited",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "353--354",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811083",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a network of parallel, non-observable
queues and analyze the Price of Anarchy (PoA) from the
new point of view where the router has the memory of
previous dispatching choices. In the regime where the
demands grow with the network size, we provide an upper
bound on the PoA by means of convex programming. To
study the impact of non-Bernoulli routers, we introduce
the Price of Forgetting (PoF) and prove that it is
bounded from above by two.\par
Numerical experiments show that the benefit of having
memory in the router is independent of the network size
and heterogeneity, and monotonically depends on the
network load only.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "convex programming; parallel queues; price of anarchy;
price of forgetting",
}
@Article{Khouzani:2010:OPS,
author = "M. H. R. Khouzani and Saswati Sarkar and Eitan
Altman",
title = "Optimal propagation of security patches in mobile
wireless networks: extended abstract",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "355--356",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811084",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Reliable security measures against outbreaks of
malware is imperative to enable large scale
proliferation of wireless technologies. Immunization
and healing of the nodes through dissemination of
security patches can counter the spread of a malware
upon an epidemic outbreak. The distribution of patches
however burdens the bandwidth which is scarce in
wireless networks. The trade-offs between security
risks and resource consumption can be attained by
activating at any given time only fractions of
dispatchers and dynamically selecting their packet
transmission rates. We formulate the above trade-offs
as an optimal control problem that seek to minimize the
aggregate network costs that depend on security risks
and resource consumed by the countermeasures. Using
Pontryagin's maximum principle, we prove that the
dynamic control strategies have simple structures. When
the resource consumption cost is concave, optimal
strategy is to use maximum resources for distribution
of patches until a threshold time, upon which, the
patching should halt. When the resource consumption
cost is convex, the above transition is strict but
continuous.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic patching; optimal control;
security-performance trade-off",
}
@Article{Le:2010:MCE,
author = "Kien Le and Ozlem Bilgir and Ricardo Bianchini and
Margaret Martonosi and Thu D. Nguyen",
title = "Managing the cost, energy consumption, and carbon
footprint of {Internet} services",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "357--358",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The large amount of energy consumed by Internet
services represents significant and fast-growing
financial and environmental costs. This paper
introduces a general, optimization-based framework and
several request distribution policies that enable
multi-data-center services to manage their brown energy
consumption and leverage green energy, while respecting
their service-level agreements (SLAs) and minimizing
energy cost. Our policies can be used to abide by caps
on brown energy consumption that might arise from
various scenarios such as government imposed
Kyoto-style carbon limits. Extensive simulations and
real experiments show that our policies allow a service
to trade off consumption and cost. For example, using
our policies, a service can reduce brown energy
consumption by 24\% for only a 10\% increase in cost,
while still abiding by SLAs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data center; energy cap; optimization; renewable
energy; request distribution",
}
@Article{Mishra:2010:CPM,
author = "Asit K. Mishra and Shekhar Srikantaiah and Mahmut
Kandemir and Chita R. Das",
title = "Coordinated power management of voltage islands in
{CMPs}",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "359--360",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiple clock domain architectures have recently been
proposed to alleviate the power problem in CMPs by
having different frequency/voltage values assigned to
each domain based on workload requirements. However,
accurate allocation of power to these voltage/frequency
islands based on time varying workload characteristics
as well as controlling the power consumption at the
provisioned power level is non-trivial. Toward this
end, we propose a two-tier feedback-based control
theoretic solution. Our first-tier consists of a global
power manager that allocates power targets to
individual islands based on the workload dynamics. The
power consumptions of these islands are in turn
controlled by a second-tier, consisting of local
controllers that regulate island power using dynamic
voltage and frequency scaling in response to workload
requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "chip multiprocessors (CMP); control theory; DVFs;
GALs",
}
@Article{Nguyen:2010:RSA,
author = "Hung X. Nguyen and Matthew Roughan",
title = "Rigorous statistical analysis of {Internet} loss
measurements",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "361--362",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a rigorous technique for
estimating confidence intervals of packet loss
measurements. Our approach is motivated by simple
observations that the loss process can be modelled as
an alternating renewal process. We use this structure
to build a Hidden Semi-Markov Model (HSMM) for the
measurement process, and from this estimate both loss
rates, and their confidence intervals. We use both
simulations and a set of more than 18000 hours of real
Internet measurements (between dedicated measurement
hosts, PlanetLab hosts, web and DNS servers) to
cross-validate our estimates, and show that they are
significantly more accurate than any current
alternative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "accuracy; loss rate; performance measurement",
}
@Article{Osogami:2010:SOT,
author = "Takayuki Osogami and Rudy Raymond",
title = "Semidefinite optimization for transient analysis of
queues",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "363--364",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We derive an upper bound on the tail distribution of
the transient waiting time for the GI/GI/1 queue from a
formulation of semidefinite programming (SDP). Our
upper bounds are expressed in closed forms using the
first two moments of the service time and the
interarrival time. The upper bounds on the tail
distributions are integrated to obtain the upper bounds
on the corresponding expectations. We also extend the
formulation of the SDP, using the higher moments of the
service time and the interarrival time, and calculate
upper bounds and lower bounds numerically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bounds; duality; g/g/1 queue; moments; occupation
measure; semidefinite programming; transient",
}
@Article{Park:2010:CCF,
author = "Dongchul Park and Biplob Debnath and David Du",
title = "{CFTL}: a convertible flash translation layer adaptive
to data access patterns",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "365--366",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The flash translation layer (FTL) is a
software/hardware interface inside NAND flash memory.
Since FTL has a critical impact on the performance of
NAND flash-based devices, a variety of FTL schemes have
been proposed to improve their performance. In this
paper, we propose a novel hybrid FTL scheme named
Convertible Flash Translation Layer (CFTL). Unlike
other existing FTLs using static address mapping
schemes, CFTL is adaptive to data access patterns so
that it can dynamically switch its mapping scheme to
either a read-optimized or a write-optimized mapping
scheme. In addition to this convertible scheme, we
propose an efficient caching strategy to further
improve the CFTL performance with only a simple hint.
Consequently, both the convertible feature and the
caching strategy empower CFTL to achieve good read
performance as well as good write performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CFTL; flash memory; flash translation layer; FTL",
}
@Article{Qian:2010:CUL,
author = "Feng Qian and Abhinav Pathak and Yu Charlie Hu and
Zhuoqing Morley Mao and Yinglian Xie",
title = "A case for unsupervised-learning-based spam
filtering",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "367--368",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "latent semantics analysis (LSA); spam campaign;
spamcampaignassassin (SCA); unsupervised learning",
}
@Article{Rajagopalan:2010:DAD,
author = "Shreevatsa Rajagopalan and Devavrat Shah",
title = "Distributed averaging in dynamic networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "369--370",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811091",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed averaging is a well-studied problem, and
often a 'prototype' for a class of fundamental
questions arising in various disciplines. Previous work
has considered the effect of dynamics in the network
topology, in terms of changes in which communication
links are present. Here, we analyze the other forms of
dynamics, namely: changes in the values at the nodes,
and nodes joining or leaving the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed averaging; distributed networks; dynamics;
message-passing",
}
@Article{Sarikaya:2010:PBP,
author = "Ruhi Sarikaya and Canturk Isci and Alper
Buyuktosunoglu",
title = "Program behavior prediction using a statistical metric
model",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "371--372",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Adaptive computing systems rely on predictions of
program behavior to understand and respond to the
dynamically varying application characteristics. This
study describes an accurate statistical workload metric
modeling scheme for predicting program phases. Our
evaluations demonstrate the superior performance of
this predictor over existing predictors on a wide range
of benchmarks. This prediction accuracy lends itself to
improved power-performance trade-offs when applied to
dynamic power management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer architecture; monitoring and forecasting;
system performance measurement; workload
characterization",
}
@Article{Shah:2010:DOQ,
author = "Devavrat Shah and Jinwoo Shin",
title = "Delay optimal queue-based {CSMA}",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "373--374",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811093",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the past year or so, an exciting progress has led
to throughput optimal design of CSMA-based algorithms
for wireless networks. However, such an algorithm
suffers from very poor delay performance. A recent work
suggests that it is impossible to design a CSMA-like
simple algorithm that is throughput optimal and induces
low delay for any wireless network. However, wireless
networks arising in practice are formed by nodes
placed, possibly arbitrarily, in some geographic
area.\par
In this paper, we propose a CSMA algorithm with
per-node average-delay bounded by a constant,
independent of the network size, when the network has
geometry (precisely, polynomial growth structure) that
is present in {\em any\/} practical wireless network.
Two novel features of our algorithm, crucial for its
performance, are (a) choice of access probabilities as
an appropriate function of queue-sizes, and (b) use of
local network topological structures. Essentially, our
algorithm is a queue-based CSMA with a minor difference
that at each time instance a very small fraction of
{\em frozen\/} nodes do not execute CSMA. Somewhat
surprisingly, appropriate selection of such frozen
nodes, in a distributed manner, lead to the delay
optimal performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Aloha; Markov chain; mixing time; wireless
multi-access",
}
@Article{Shye:2010:CMU,
author = "Alex Shye and Benjamin Scholbrock and Gokhan Memik and
Peter A. Dinda",
title = "Characterizing and modeling user activity on
smartphones: summary",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "375--376",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a comprehensive analysis of
real smartphone usage during a 6-month study of real
user activity on the Android G1 smartphone. Our goal is
to study the high-level characteristics of smartphone
usage, and to understand the implications on optimizing
smartphones, and their networks. Overall, we present 11
findings that cover general usage behavior, interaction
with the battery, power consumption, network activity,
frequently-run applications, and modeling usage
states.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "embedded systems; human factors",
}
@Article{Silveira:2010:DTA,
author = "Fernando Silveira and Christophe Diot and Nina Taft
and Ramesh Govindan",
title = "Detecting traffic anomalies using an equilibrium
property",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "377--378",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811095",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When many flows are multiplexed on a non-saturated
link, their volume changes over short timescales tend
to cancel each other out, making the average change
across flows close to zero. This equilibrium property
holds if the flows are nearly independent, and it is
violated by traffic changes caused by several
correlated flows. We exploit this empirical property to
design a computationally simple anomaly detection
method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "anomaly detection; statistical test",
}
@Article{Soundararajan:2010:CSE,
author = "Niranjan Soundararajan and Anand Sivasubramaniam and
Vijay Narayanan",
title = "Characterizing the soft error vulnerability of
multicores running multithreaded applications",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "379--380",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811096",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multicores have become the platform of choice across
all market segments. Cost-effective protection against
soft errors is important in these environments, due to
the need to move to lower technology generations and
the exploding number of transistors on a chip. While
multicores offer the flexibility of varying the number
of application threads and the number of cores on which
they run, the reliability impact of choosing one
configuration over another is unclear. Our study
reveals that the reliability costs vary dramatically
between configurations and being unaware could lead to
a sub-optimal choice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fit rate; multicore; soft errors",
}
@Article{Tan:2010:CMM,
author = "Jian Tan and Wei Wei and Bo Jiang and Ness Shroff and
Don Towsley",
title = "Can multipath mitigate power law delays?: effects of
parallelism on tail performance",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "381--382",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multipath; parallelism; power law; redundant
transmission; split transmission",
}
@Article{Tomozei:2010:DUP,
author = "Dan-Cristian Tomozei and Laurent Massouli{\'e}",
title = "Distributed user profiling via spectral methods",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "383--384",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811098",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "User profiling is a useful primitive for constructing
personalized services, such as content recommendation.
In the present work we investigate the feasibility of
user profiling in a distributed setting, with no
central authority and only local information exchanges
between users. Our main contributions are: (i) We
propose a spectral clustering technique, and prove its
ability to recover unknown user profiles with only few
measures of affinity between users. (ii) We develop
distributed algorithms which achieve an embedding of
users into a low-dimensional space, based on spectral
transformation. These involve simple message passing
among users, and provably converge to the desired
embedding.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clustering; distributed spectral embedding; gossip",
}
@Article{George:2010:AAC,
author = "David K. George and Cathy H. Xia",
title = "Asymptotic analysis of closed queueing networks and
its implications to achievable service levels",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "3--5",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870180",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buaic:2010:SBM,
author = "Ana Buaic and Varun Gupta and Jean Mairesse",
title = "Stability of the bipartite matching model",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "6--8",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870181",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2010:RWD,
author = "Ali Tizghadam and Alberto Leon-Garcia",
title = "On random walks in direction-aware network problems",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "9--11",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870182",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2010:ART,
author = "Minghong Lin and Adam Wierman and Bert Zwart",
title = "The average response time in a heavy-traffic {SRPT}
queue",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "12--14",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870183",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sigman:2010:HTL,
author = "Karl Sigman and Ward Whitt",
title = "Heavy-traffic limits for nearly deterministic queues",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "15--17",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870184",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ye:2010:DLT,
author = "Heng-Qing Ye and David D. Yao",
title = "Diffusion limit of a two-class network: stationary
distributions and interchange of limits",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "18--20",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870185",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nino-Mora:2010:IPA,
author = "Jos{\'e} Ni{\~n}o-Mora",
title = "Index policies for admission and routing of soft
real-time traffic to parallel queues",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "21--23",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870186",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Robert:2010:MFA,
author = "Philippe Robert and Jim Roberts",
title = "A mean field approximation for the capacity of
server-limited, gate-limited multi-server polling
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "24--26",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870187",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2010:FAL,
author = "Yunan Liu and Ward Whitt",
title = "A fluid approximation for large-scale service
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "27--29",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870188",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2010:MFL,
author = "Nicolas Gast and Bruno Gaujal",
title = "Mean field limit of non-smooth systems and
differential inclusions",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "30--32",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870189",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Radovanovic:2010:RMT,
author = "Ana Radovanovi{\'c} and Assaf Zeevi",
title = "Revenue maximization through ``smart'' inventory
management in reservation-based online advertising",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "33--35",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870190",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cho:2010:VFP,
author = "Jeong-woo Cho and Jean-Yves {Le Boudec} and Yuming
Jiang",
title = "On the validity of the fixed point equation and
decoupling assumption for analyzing the {802.11 MAC}
protocol",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "36--38",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870191",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance evaluation of the 802.11 MAC protocol is
classically based on the decoupling assumption, which
hypothesizes that the backoff processes at different
nodes are independent. A necessary condition for the
validity of this approach is the existence and
uniqueness of a solution to a fixed point equation.
However, it was also recently pointed out that this
condition is not sufficient; in contrast, a necessary
and sufficient condition is a global stability property
of the associated ordinary differential equation. Such
a property was established only for a specific case,
namely for a homogeneous system (all nodes have the
same parameters) and when the number of backoff stages
is either 1 or infinite and with other restrictive
conditions. In this paper, we give a simple condition
that establishes the validity of the decoupling
assumption for the homogeneous case. We also discuss
the heterogeneous and the differentiated service cases
and show that the uniqueness condition is not
sufficient; we exhibit one case where the fixed point
equation has a unique solution but the decoupling
assumption is not valid.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vandeVen:2010:ETR,
author = "P. M. van de Ven and S. C. Borst and D. Denteneer and
A. J. E. M. Janssen and J. S. H. van Leeuwaarden",
title = "Equalizing throughputs in random-access networks",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "39--41",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870192",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marot:2010:RCP,
author = "Michel Marot and Vincent Gauthier",
title = "Reducing collision probability on a shared medium
using a variational method",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "42--44",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870193",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2010:AMM,
author = "Yingdong Lu and Mark S. Squillante",
title = "On approximations for multiple multidimensional
stochastic knapsacks",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "45--47",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870194",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2010:DRM,
author = "Anshul Gandhi and Mor Harchol-Balter and Ivo Adan",
title = "Decomposition results for an {M/M/k} with staggered
setup",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "48--50",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870195",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider an M/M/k queueing system
with setup costs. Servers are turned off when there is
no work to do, but turning on an off server incurs a
setup cost. The setup cost takes the form of a time
delay and a power penalty. Setup costs are common in
manufacturing systems, data centers and disk farms,
where idle servers are turned off to save on operating
costs. Since servers in setup mode consume a lot of
power, the number of servers that can be in setup at
any time is often limited. In the staggered setup
model, at most one server can be in setup at any time.
While recent literature has analyzed an M/M/k system
with staggered setup and exponentially distributed
setup times, no closed-form solutions were obtained. We
provide the first analytical closed-form expressions
for the limiting distribution of the system states, the
distribution of response times, and the mean power
consumption for the above system. In particular, we
prove the following decomposition property: the
response time for an M/M/k system with staggered setup
is equal, in distribution, to the sum of response time
for an M/M/k system without setup, and the setup
time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2010:EIS,
author = "Ranjan Pal and Leana Golubchik",
title = "On the economics of information security: the problem
of designing optimal cyber-insurance contracts",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "51--53",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870196",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dube:2010:RDC,
author = "Parijat Dube and Li Zhang",
title = "Resiliency of distributed clock synchronization
networks",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "54--56",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870197",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Clock synchronization refers to techniques and
protocols used to maintain mutually consistent
time-of-day clocks in a coordinated network of
computers. A (clock) synchronization network is an
interconnection of computers to implement a particular
clock synchronization solution. To prevent
clock-dependency loops, most synchronization networks
use a stratified approach which is essentially a tree
structure with a Primary Reference Clock (at
``stratum-0''). A node at stratum-$ i + 1 $ exchanges
synchronization messages with its parent node at
stratum-$i$ and also with some other nodes at the same
or other level. The purpose of this redundancy is two
fold: (i) to calculate smoother steering rate
adjustment, (ii) to maintain connectivity in the event
of a failure. We provide an analytical framework to
evaluate the performance of different approaches for
resilient synchronization networks. To evaluate
resiliency of synchronization networks, we characterize
failure recovery metrics like connectivity and failure
detection delay in terms of parameters related to
network topology and failure recovery solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2010:RAD,
author = "Xiaozhou Li and Mark Lillibridge and Mustafa Uysal",
title = "Reliability analysis of deduplicated and erasure-coded
storage",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "4--9",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kulkarni:2010:TAI,
author = "Milind Kulkarni and Vijay Pai and Derek Schuff",
title = "Towards architecture independent metrics for multicore
performance analysis",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "10--14",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shepard:2010:LMW,
author = "Clayton Shepard and Ahmad Rahmati and Chad Tossell and
Lin Zhong and Phillip Kortum",
title = "{LiveLab}: measuring wireless networks and smartphone
users in the field",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "15--20",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hahn:2010:UVL,
author = "Dongwoon Hahn and Ginnah Lee and Brenton Walker and
Matt Beecher and Padma Mundur",
title = "Using virtualization and live migration in a scalable
mobile wireless testbed",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "21--25",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925024",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shakkottai:2010:TCD,
author = "Srinivas Shakkottai and Lei Ying and Sankalp Sah",
title = "Targeted coupon distribution using social networks",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "26--30",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gopalakrishnan:2010:AVG,
author = "Ragavendran Gopalakrishnan and Jason R. Marden and
Adam Wierman",
title = "An architectural view of game theoretic control",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "31--36",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2010:DDL,
author = "Zhongmei Yao and Daren B. H. Cline and Dmitri
Loguinov",
title = "In-degree dynamics of large-scale {P2P} systems",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "37--42",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joumblatt:2010:HAE,
author = "Diana Joumblatt and Renata Teixeira and Jaideep
Chandrashekar and Nina Taft",
title = "{HostView}: annotating end-host performance
measurements with user feedback",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "43--48",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adhikari:2010:TMR,
author = "Vijay Kumar Adhikari and Sourabh Jain and Zhi-Li
Zhang",
title = "From traffic matrix to routing matrix: {PoP} level
traffic characteristics for a {Tier-1 ISP}",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "49--54",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2010:SIG,
author = "Martin Arlitt and Niklas Carlsson and Jerry Rolia",
title = "Special issue on the {2010 GreenMetrics workshop}",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "??--??",
month = dec,
year = "2010",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnan:2010:VPM,
author = "Bhavani Krishnan and Hrishikesh Amur and Ada
Gavrilovska and Karsten Schwan",
title = "{VM} power metering: feasibility and challenges",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "56--60",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925031",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Phillips:2010:RAI,
author = "Steven Phillips and Sheryl L. Woodward and Mark D.
Feuer and Peter D. Magill",
title = "A regression approach to infer electricity consumption
of legacy telecom equipment",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "61--65",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sikdar:2010:EII,
author = "Biplab Sikdar",
title = "Environmental impact of {IEEE 802.11} access points: a
case study",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "66--70",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless local area networks have become an ubiquitous
means for network access in both residential and
commercial locations over the recent past. Given their
widespread deployment, it is of importance to
understand their environmental impact and this paper
presents a life cycle assessment of the energy
intensity of IEEE 802.11 wireless access points.
Following a cradle-to-grave approach, we evaluate the
energy consumed in the manufacture of access points
(including the extraction of raw materials, component
manufacturing, assembly, and transportation) as well as
during its actual usage. Our results show that the
manufacturing stage is responsible for a significant
fraction of the overall energy consumption. In light of
our findings, increasing the overall lifetime is one of
the recommended ways to reduce the environmental impact
of access points.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{V:2010:NDB,
author = "{Prabhakar T.V.} and {Akshay Uttama Nambi S.N.} and
{Jamadagni H.S.} and Krishna Swaroop and R. Venkatesha
Prasad and I. G. M. M. Niemegeers",
title = "A novel {DTN} based energy neutral transfer scheme for
energy harvested {WSN Gateways}",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "71--75",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lombardo:2010:AES,
author = "Alfio Lombardo and Carla Panarello and Giovanni
Schembra",
title = "Achieving energy savings and {QoS} in {Internet}
access routers",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "76--80",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bianzino:2010:AAF,
author = "Aruna Prem Bianzino and Anand Kishore Raju and Dario
Rossi",
title = "Apples-to-apples: a framework analysis for
energy-efficiency in networks",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "81--85",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Janssen:2011:USD,
author = "Curtis L. Janssen and Helgi Adalsteinsson and Joseph
P. Kenny",
title = "Using simulation to design extremescale applications
and architectures: programming model exploration",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "4--8",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964220",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Giles:2011:PAO,
author = "M. B. Giles and G. R. Mudalige and Z. Sharif and G.
Markall and P. H. J. Kelly",
title = "Performance analysis of the {OP2} framework on
many-core architectures",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "9--15",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Herdman:2011:BMP,
author = "J. A. Herdman and W. P. Gaudin and D. Turland and S.
D. Hammond",
title = "Benchmarking and modelling of {POWER7}, {Westmere},
{BG/P}, and {GPUs}: an industry case study",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "16--22",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964222",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Pennycook:2011:PAH,
author = "S. J. Pennycook and S. D. Hammond and S. A. Jarvis and
G. R. Mudalige",
title = "Performance analysis of a hybrid {MPI\slash CUDA}
implementation of the {NASLU} benchmark",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "23--29",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964223",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Budanur:2011:MTC,
author = "Sandeep Budanur and Frank Mueller and Todd Gamblin",
title = "Memory Trace Compression and Replay for {SPMD} Systems
using Extended {PRSDs}?",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "30--36",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964224",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Rodrigues:2011:SST,
author = "A. F. Rodrigues and K. S. Hemmert and B. W. Barrett
and C. Kersey and R. Oldfield and M. Weston and R.
Risen and J. Cook and P. Rosenfeld and E. CooperBalls
and B. Jacob",
title = "The {Structural Simulation Toolkit}",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "37--42",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964225",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Karlin:2011:PMP,
author = "Ian Karlin and Elizabeth Jessup and Geoffrey Belter
and Jeremy G. Siek",
title = "Parallel memory prediction for fused linear algebra
kernels",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "43--49",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964226",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Nakasato:2011:FGI,
author = "Naohito Nakasato",
title = "A fast {GEMM} implementation on the {Cypress GPU}",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "50--55",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964227",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Wu:2011:PCH,
author = "Xingfu Wu and Valerie Taylor",
title = "Performance characteristics of hybrid {MPI\slash
OpenMP} implementations of {NAS} parallel benchmarks
{SP} and {BT} on large-scale multicore supercomputers",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "56--62",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Hsieh:2011:FAL,
author = "Ming-yu Hsieh and Arun Rodrigues and Rolf Riesen and
Kevin Thompson and William Song",
title = "A framework for architecture-level power, area, and
thermal simulation and its application to
network-on-chip design exploration",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "63--68",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964229",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Perks:2011:SWW,
author = "O. Perks and S. D. Hammond and S. J. Pennycook and S.
A. Jarvis",
title = "Should we worry about memory loss?",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "69--74",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Cook:2011:SPM,
author = "Jeanine Cook and Jonathan Cook and Waleed Alkohlani",
title = "A statistical performance model of the {Opteron}
processor",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "75--80",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964231",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Tabbal:2011:PDE,
author = "Alexandre Tabbal and Matthew Anderson and Maciej
Brodowicz and Hartmut Kaiser and Thomas Sterling",
title = "Preliminary design examination of the {ParalleX}
system from a software and hardware perspective",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "81--87",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{McIntosh-Smith:2011:EAM,
author = "Simon McIntosh-Smith and Terry Wilson and Jon Crisp
and Amaurys {\'A}vila Ibarra and Richard B. Sessions",
title = "Energy-aware metrics for benchmarking heterogeneous
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "88--94",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Chen:2011:MPR,
author = "Jian Chen and Lizy Kurian John and Dimitris
Kaseridis",
title = "Modeling program resource demand using inherent
program characteristics",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "1--12",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharifi:2011:MME,
author = "Akbar Sharifi and Shekhar Srikantaiah and Asit K.
Mishra and Mahmut Kandemir and Chita R. Das",
title = "{METE}: meeting end-to-end {QoS} in multicores through
system-wide resource management",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "13--24",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:SIC,
author = "Yuanrui Zhang and Mahmut Kandemir and Taylan Yemliha",
title = "Studying inter-core data reuse in multicores",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "25--36",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2011:SIH,
author = "Fang Liu and Yan Solihin",
title = "Studying the impact of hardware prefetching and
bandwidth partitioning in chip-multiprocessors",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "37--48",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007121",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alizadeh:2011:SAQ,
author = "Mohammad Alizadeh and Abdul Kabbani and Berk Atikoglu
and Balaji Prabhakar",
title = "Stability analysis of {QCN}: the averaging principle",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "49--60",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007123",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joseph:2011:SNM,
author = "Vinay Joseph and Gustavo de Veciana",
title = "Stochastic networks with multipath flow control:
impact of resource pools on flow-level performance and
network congestion",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "61--72",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007124",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alizadeh:2011:ADS,
author = "Mohammad Alizadeh and Adel Javanmard and Balaji
Prabhakar",
title = "Analysis of {DCTCP}: stability, convergence, and
fairness",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "73--84",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007125",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suh:2011:SEB,
author = "Jinho Suh and Mehrtash Manoochehri and Murali
Annavaram and Michel Dubois",
title = "Soft error benchmarking of {L2} caches with {PARMA}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "85--96",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suchara:2011:NAJ,
author = "Martin Suchara and Dahai Xu and Robert Doverspike and
David Johnson and Jennifer Rexford",
title = "Network architecture for joint failure recovery and
traffic engineering",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "97--108",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007128",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Subhraveti:2011:RTP,
author = "Dinesh Subhraveti and Jason Nieh",
title = "Record and transplay: partial checkpointing for replay
debugging across heterogeneous systems",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "109--120",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007129",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsitsiklis:2011:PEL,
author = "John N. Tsitsiklis and Kuang Xu",
title = "On the power of (even a little) centralization in
distributed processing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "121--132",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007131",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2011:WPA,
author = "Thanh Nguyen and Milan Vojnovic",
title = "Weighted proportional allocation",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "133--144",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007132",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aalto:2011:OTB,
author = "Samuli Aalto and Aleksi Penttinen and Pasi Lassila and
Prajwal Osti",
title = "On the optimal trade-off between {SRPT} and
opportunistic scheduling",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "145--155",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007133",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2011:SAS,
author = "Edith Cohen and Graham Cormode and Nick Duffield",
title = "Structure-aware sampling on data streams",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "157--168",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007135",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korada:2011:GP,
author = "Satish Babu Korada and Andrea Montanari and Sewoong
Oh",
title = "Gossip {PCA}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "169--180",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007136",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Urgaonkar:2011:OPC,
author = "Rahul Urgaonkar and Bhuvan Urgaonkar and Michael J.
Neely and Anand Sivasubramaniam",
title = "Optimal power cost management using stored energy in
data centers",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "181--192",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2011:GGL,
author = "Zhenhua Liu and Minghong Lin and Adam Wierman and
Steven H. Low and Lachlan L. H. Andrew",
title = "Greening geographical load balancing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "193--204",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2011:SP,
author = "Giang T. K. Nguyen and Rachit Agarwal and Junda Liu
and Matthew Caesar and P. Brighten Godfrey and Scott
Shenker",
title = "Slick packets",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "205--216",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:2011:GRD,
author = "Simon S. Lam and Chen Qian",
title = "Geographic routing in $d$-dimensional spaces with
guaranteed delivery and low stretch",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "217--228",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007142",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rozner:2011:MDO,
author = "Eric Rozner and Mi Kyung Han and Lili Qiu and Yin
Zhang",
title = "Model-driven optimization of opportunistic routing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "229--240",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007143",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kurant:2011:WGM,
author = "Maciej Kurant and Minas Gjoka and Carter T. Butts and
Athina Markopoulou",
title = "Walking on a graph with a magnifying glass: stratified
sampling via weighted random walks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "241--252",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007145",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anandkumar:2011:TDS,
author = "Animashree Anandkumar and Avinatan Hassidim and
Jonathan Kelner",
title = "Topology discovery of sparse random graphs with few
participants",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "253--264",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2011:CMI,
author = "M. Zubair Shafiq and Lusheng Ji and Alex X. Liu and
Jia Wang",
title = "Characterizing and modeling {Internet} traffic
dynamics of cellular devices",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "265--276",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2011:CDN,
author = "Qiang Xu and Junxian Huang and Zhaoguang Wang and Feng
Qian and Alexandre Gerber and Zhuoqing Morley Mao",
title = "Cellular data network infrastructure characterization
and implication on mobile content placement",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "277--288",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007149",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2011:FGL,
author = "Myungjin Lee and Sharon Goldberg and Ramana Rao
Kompella and George Varghese",
title = "Fine-grained latency and loss measurements in the
presence of reordering",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "289--300",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2011:SOU,
author = "Xia Zhou and Stratis Ioannidis and Laurent Massoulie",
title = "On the stability and optimality of universal swarms",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "301--312",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007151",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eibl:2011:FBE,
author = "Patrick J. Eibl and Albert Meixner and Daniel J.
Sorin",
title = "An {FPGA}-based experimental evaluation of
microprocessor core error detection with {Argus-2}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "313--314",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007153",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:RKD,
author = "Lele Zhang and Darryl Veitch and Kotagiri
Ramamohanarao",
title = "The role of {KL} divergence in anomaly detection",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "315--316",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krevat:2011:AIL,
author = "Elie Krevat and Tomer Shiran and Eric Anderson and
Joseph Tucek and Jay J. Wylie and Gregory R. Ganger",
title = "Applying idealized lower-bound runtime models to
understand inefficiencies in data-intensive computing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "317--318",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007155",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2011:HPC,
author = "Jinyoung Han and Taejoong Chung and Seungbae Kim and
Ted Taekyoung Kwon and Hyun-chul Kim and Yanghee Choi",
title = "How prevalent is content bundling in {BitTorrent}?",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "319--320",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rao:2011:SAP,
author = "Jia Rao and Xiangping Bu and Kun Wang and Cheng-Zhong
Xu",
title = "Self-adaptive provisioning of virtualized resources in
cloud computing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "321--322",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2011:CAR,
author = "Chao Li and Amer Qouneh and Tao Li",
title = "Characterizing and analyzing renewable energy driven
data centers",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "323--324",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2011:TMB,
author = "Varun Gupta and Takayuki Osogami",
title = "Tight moments-based bounds for queueing systems",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "325--326",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2011:SMT,
author = "Suk-Bok Lee and Dan Pei and MohammadTaghi Hajiaghayi
and Ioannis Pefkianakis and Songwu Lu and He Yan and
Zihui Ge and Jennifer Yates and Mario Kosseifi",
title = "Scalable monitoring via threshold compression in a
large operational {$3$G} network",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "327--328",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007160",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adhikari:2011:HDY,
author = "Vijay Kumar Adhikari and Sourabh Jain and Yingying
Chen and Zhi-Li Zhang",
title = "How do you '{Tube}'?",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "329--330",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2011:CSB,
author = "Krishna Kant",
title = "A control scheme for batching {DRAM} requests to
improve power efficiency",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "331--332",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:ONS,
author = "Hao Zhang and Ziyu Shao and Minghua Chen and Kannan
Ramchandran",
title = "Optimal neighbor selection in {BitTorrent}-like
peer-to-peer networks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "333--334",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ihm:2011:TUM,
author = "Sunghwan Ihm and Vivek S. Pai",
title = "Towards understanding modern {Web} traffic",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "335--336",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akella:2011:OIR,
author = "Aditya Akella and Shuchi Chawla and Holly Esquivel and
Chitra Muthukrishnan",
title = "De-ossifying {Internet} routing through intrinsic
support for end-network and {ISP} selfishness",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "337--338",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:2011:DSP,
author = "Yu-Ju Hong and Jiachen Xue and Mithuna Thottethodi",
title = "Dynamic server provisioning to minimize cost in an
{IaaS} cloud",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "339--340",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Srinivasan:2011:HHA,
author = "Sadagopan Srinivasan and Ravishankar Iyer and Li Zhao
and Ramesh Illikkal",
title = "{HeteroScouts}: hardware assist for {OS} scheduling in
heterogeneous {CMPs}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "341--342",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ribeiro:2011:CCT,
author = "Bruno Ribeiro and Daniel Figueiredo and Edmundo {de
Souza e Silva} and Don Towsley",
title = "Characterizing continuous-time random walks on dynamic
networks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "343--344",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007168",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2011:AAN,
author = "Jian Chen and Lizy Kurian John",
title = "Autocorrelation analysis: a new and improved method
for measuring branch predictability",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "345--346",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007169",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2011:IGM,
author = "Satinder Pal Singh and Randolph Baden and Choon Lee
and Bobby Bhattacharjee and Richard La and Mark
Shayman",
title = "{IP} geolocation in metropolitan areas",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "347--348",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007170",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2011:TBS,
author = "Jay Chen and Janardhan Iyengar and Lakshminarayanan
Subramanian and Bryan Ford",
title = "{TCP} behavior in sub packet regimes",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "349--350",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007171",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bowden:2011:NLT,
author = "Rhys Alistair Bowden and Matthew Roughan and Nigel
Bean",
title = "Network link tomography and compressive sensing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "351--352",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007172",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulati:2011:STM,
author = "Ajay Gulati and Irfan Ahmad",
title = "Storage technologies, management and troubleshooting
in virtualized datacenters",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "353--354",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007174",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sengupta:2011:CDC,
author = "Sudipta Sengupta",
title = "Cloud data center networks: technologies, trends, and
challenges",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "355--356",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007175",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2011:BAW,
author = "Giuliano Casale",
title = "Building accurate workload models using {Markovian}
arrival processes",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "357--358",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007176",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2011:NAC,
author = "Florin Ciucu",
title = "Non-asymptotic capacity and delay analysis of mobile
wireless networks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "359--360",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007177",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elmokashfi:2011:SSI,
author = "Ahmed Elmokashfi and Amund Kvalbein and Constantine
Dovrolis",
title = "{SIMROT}: a scalable inter-domain routing toolbox",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "4--13",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034834",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sen:2011:CIH,
author = "Aritra Sen and Ankit Garg and Akshat Verma and Tapan
Nayak",
title = "{CloudBridge}: on integrated hardware-software
consolidation",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "14--25",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034835",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2011:ENE,
author = "Jayakrishnan Nair and Adam Wierman and Bert Zwart",
title = "Exploiting network effects in the provisioning of
large scale systems",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "26--28",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034837",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nightingale:2011:PES,
author = "James Nightingale and Qi Wang and Christos Grecos",
title = "Performance evaluation of scalable video streaming in
multihomed mobile networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "29--31",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034838",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bouman:2011:DPB,
author = "N. Bouman and S. C. Borst and J. S. H. van
Leeuwaarden",
title = "Delay performance of backlog based random access",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "32--34",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034839",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shneer:2011:CSC,
author = "Seva Shneer and Peter M. van de Ven",
title = "Comparing slotted and continuous {CSMA}: throughputs
and fairness",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "35--37",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034840",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shvets:2011:AMI,
author = "Evgeny Shvets and Andrey Lyakhov and Alexander Safonov
and Evgeny Khorov",
title = "Analytical model of {IEEE 802.11s MCCAbased} streaming
in the presence of noise",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "38--40",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034841",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayesta:2011:HTA,
author = "U. Ayesta and A. Izagirre and I. M. Verloop",
title = "Heavy traffic analysis of the discriminatory
random-order-of-service discipline",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "41--43",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boon:2011:QNS,
author = "M. A. A. Boon and R. D. van der Mei and E. M. M.
Winands",
title = "Queueing networks with a single shared server: light
and heavy traffic",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "44--46",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Frolkova:2011:FPA,
author = "Maria Frolkova and Josh Reed and Bert Zwart",
title = "Fixed-point approximations of bandwidth sharing
networks with rate constraints",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "47--49",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034844",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cano:2011:IPF,
author = "Maria Dolores Cano",
title = "Improving path failure detection in {SCTP} using
adaptive heartbeat time intervals",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "50--52",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varis:2011:NSB,
author = "Nuutti Varis and Jukka Manner",
title = "In the network: {Sandy Bridge} versus {Nehalem}",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "53--55",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2011:EPS,
author = "Jonatha Anselmi Anselmi and Bruno Gaujal",
title = "On the efficiency of perfect simulation in monotone
queueing networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "56--58",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034847",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2011:CLD,
author = "Y. M. Baryshnikov and E. G. Coffman and K. J. Kwak",
title = "{CAUCHY} localization: a distributed computation of
{WSNs}",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "59--61",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034848",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goga:2011:IFS,
author = "Oana Goga and Patrick Loiseau and Paulo
Gon{\c{c}}alves",
title = "On the impact of the flow size distribution's tail
index on network performance with {TCP} connections",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "62--64",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2011:LBP,
author = "B. {Van Houdt}",
title = "Load balancing and the power of preventive probing",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "65--67",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:2011:PAC,
author = "Eitan Altman and Rachid {El Azouzi} and Daniel S.
Menasch{\'e} and Yuedong Xu",
title = "Poster: Aging control for smartphones in hybrid
networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "68--68",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bokharaei:2011:PTN,
author = "Hossein Kaffash Bokharaei and Yashar Ganjali and Ram
Keralapura and Antonio Nucci",
title = "Poster: Telephony network characterization for spammer
identification",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "69--69",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034853",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bosman:2011:POD,
author = "Joost Bosman and Rob van der Mei and Gerard Hoekstra",
title = "Poster: Optimal dispatching policies for parallel
processor sharing nodes with partial information",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "70--70",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dong:2011:PPS,
author = "Ke Dong and Diptanil DebBarma and R. Venkatesha and
Prasad Cheng Guo",
title = "Poster: Performance study of clustering of {Zigbee}
devices in {OPNET}",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "71--71",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lubben:2011:PCD,
author = "Ralf L{\"u}bben and Markus Fidler",
title = "Poster: On the capacity delay error tradeoff of source
coding",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "72--72",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2011:PTE,
author = "Vladimir Marbukh",
title = "Poster: {Tcp} effective bandwidth and {Internet}
performance",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "73--73",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massey:2011:PSV,
author = "William A. Massey and Jamol Pender",
title = "Poster: Skewness variance approximation for dynamic
rate {MultiServer} queues with abandonment",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "74--74",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rahman:2011:PGF,
author = "Ashikur Rahman and Carey Williamson",
title = "Poster: {$ \Delta $}-Graphs: flexible topology control
in wireless ad hoc networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "75--75",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rahman:2011:PCM,
author = "Ashikur Rahman and Hanan Shpungin and Carey
Williamson",
title = "Poster: On capacity maximization in wireless relay
networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "76--76",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Romano:2011:PSB,
author = "Paolo Romano and Matteo Leonetti",
title = "Poster: Selftuning batching in total order broadcast
via analytical modelling and reinforcement learning",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "77--77",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yan:2011:PDV,
author = "Zhichao Yan and Dan Feng and Yujuan Tan",
title = "Poster: Dissection the version management schemes in
hardware transactional memory systems",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "78--78",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Czekster:2011:EVD,
author = "Ricardo M. Czekster and Paulo Fernandes and Thais
Webber",
title = "Efficient vector-descriptor product exploiting
time-memory trade-offs!",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "2--9",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lilja:2011:PAS,
author = "David J. Lilja and Raffaela Mirandola and Kai Sachs",
title = "Paper abstracts of the second international conference
on performance engineering ({ICPE 2011})",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "2--9",
month = dec,
year = "2011",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2011:IBT,
author = "Mark S. Squillante",
title = "Instrumentation-based tool for latency measurements
(abstracts only)",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "20--20",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papadimitriou:2011:PVR,
author = "Dimitri Papadimitriou and Florin Coras and Albert
Cabellos",
title = "Path-vector routing stability analysis",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "22--24",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160848",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:2011:DAS,
author = "Haotian Zhao and Yinlong Xu",
title = "A deterministic algorithm of single failed node
recovery in {MSR}-based distributed storage systems",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "25--27",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garg:2011:RHD,
author = "Siddharth Garg and Shreyas Sundaram and Hiren D.
Patel",
title = "Robust heterogeneous data center design: a principled
approach",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "28--30",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2011:RWN,
author = "Ali Tizghadam and Alberto Leon-Garcia and Hassan
Naser",
title = "On robust wireless network optimization using network
criticality",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "31--33",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lelarge:2011:DCB,
author = "Marc Lelarge",
title = "Diffusion and cascading behavior in random networks",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "34--36",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abdelrahman:2011:SNH,
author = "Omer H. Abdelrahman and Erol Gelenbe",
title = "Search in non-homogeneous random environments?",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "37--39",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160853",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feng:2011:EPQ,
author = "Hanhua Feng and Parijat Dube and Li Zhang",
title = "On estimation problems for the {$ G / G / \infty $}
Queue",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "40--42",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doroudi:2011:DIF,
author = "Sherwin Doroudi and Ragavendran Gopalakrishnan and
Adam Wierman",
title = "Dispatching to incentivize fast service in
multi-server queues",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "43--45",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akgun:2011:PPP,
author = "Osman T. Akgun and Rhonda Righter and Ronald Wolff",
title = "The power of partial power of two choices",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "46--48",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2011:SLQ,
author = "Ranjan Pal and Sokol Kosta and Pan Hui",
title = "Settling for less: a {QoS} compromise mechanism for
opportunistic mobile networks",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "49--51",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2011:IEN,
author = "Zichao Yang and John C. S. Lui",
title = "Investigating the effect of node heterogeneity and
network externality on security adoption",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "52--54",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2011:IPS,
author = "Daniel Sadoc Menasch{\'e} and Antonio A. de A. Rocha
and Edmundo A. {de Souza e Silva} and Don Towsley and
Rosa M. Meri Le{\"a}o",
title = "Implications of peer selection strategies by
publishers on the performance of {P2P} swarming
systems",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "55--57",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aalto:2011:HIA,
author = "Samuli Aalto and Pasi Lassila and Petri Savolainen and
Sasu Tarkoma",
title = "How impatience affects the performance and scalability
of {P2P} video-on-demand systems",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "58--60",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2011:PGW,
author = "Martin Arlitt and Niklas Carlsson and Jerry Rolia",
title = "{Proceedings of the 2011 GreenMetrics} workshop",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "58--60",
month = dec,
year = "2011",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2011:GLB,
author = "Zhenhua Liu and Minghong Lin and Adam Wierman and
Steven H. Low and Lachlan L. H. Andrew",
title = "Geographical load balancing with renewables",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "62--66",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:2011:TGC,
author = "Eitan Altman and Manjesh K. Hanawal and Rachid
ElAzouzi and Sholomo Shamai",
title = "Tradeoffs in green cellular networks",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "67--71",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sucevic:2011:PEE,
author = "Andrew Sucevic and Lachlan L. H. Andrew and Thuy T. T.
Nguyen",
title = "Powering down for energy efficient peer-to-peer file
distribution",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "72--76",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160864",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brown:2011:RPS,
author = "Michael Brown and Jose Renau",
title = "{ReRack}: power simulation for data centers with
renewable energy generation",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "77--81",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yan:2011:CRS,
author = "Feng Yan and Xenia Mountrouidou and Alma Riska and
Evgenia Smirni",
title = "Copy rate synchronization with performance guarantees
for work consolidation in storage clusters",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "82--86",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2011:APR,
author = "Vishal Gupta and Ripal Nathuji and Karsten Schwan",
title = "An analysis of power reduction in datacenters using
heterogeneous chip multiprocessors",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "87--91",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2011:HSS,
author = "Giuliano Casale and Ioan Raicu",
title = "{HPDC\slash SIGMETRICS} student research posters",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "92--96",
month = dec,
year = "2011",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2011:UCG,
author = "Doron Chen and Ealan Henis and Ronen I. Kat and Dmitry
Sotnikov and Cinzia Cappiello and Alexandre Mello
Ferreira and Barbara Pernici and Monica Vitali and Tao
Jiang and Jia Liu and Alexander Kipp",
title = "Usage centric green performance indicators",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "92--96",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:BBH,
author = "Yuanrui Zhang and Jun Liu and Sai Prashanth
Muralidhara and Mahmut Kandemir",
title = "{BrickX}: building hybrid systems for recursive
computations",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "98--100",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blackburn:2011:CGS,
author = "Jeremy Blackburn and Ramanuja Simha and Clayton Long
and Xiang Zuo and Nicolas Kourtellis and John Skvoretz
and Adriana Iamnitchi",
title = "Cheaters in a gaming social network",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "101--103",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160871",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stefanek:2011:FCP,
author = "Anton Stefanek and Richard A. Hayden and Jeremy T.
Bradley",
title = "Fluid computation of the performance: energy tradeoff
in large scale {Markov} models",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "104--106",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160872",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2011:IHP,
author = "Shingyu Kim and Junghee Won and Hyuck Han and
Hyeonsang Eom and Heon Y. Yeom",
title = "Improving {Hadoop} performance in intercloud
environments",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "107--109",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2011:IPE,
author = "Yong Oh Lee",
title = "Improving performance and energy savings through
alternative forwarding",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "110--112",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160874",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2011:IPM,
author = "Seungmi Choi and Shingyu Kim and Hyuck Han and Heon Y.
Yeom",
title = "Improving performance of {MapReduce} framework on
{InterCloud} by avoiding transmission of unnecessary
data",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "113--115",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gadre:2011:IMF,
author = "Hrishikesh Gadre and Ivan Rodero and Manish Parashar",
title = "Investigating {MapReduce} framework extensions for
efficient processing of geographically scattered
datasets",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "116--118",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hayden:2011:MFA,
author = "Richard A. Hayden",
title = "Mean-field approximations for performance models with
generally-timed transitions",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "119--121",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2011:MMV,
author = "Rohan Gandhi and Dimitrios Koutsonikolas and Y.
Charlie Hu",
title = "Multicasting {MDC} videos to receivers with different
screen resolution",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "122--124",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sawalha:2011:TSH,
author = "Lina Sawalha and Monte P. Tull and Ronald D. Barnes",
title = "Thread scheduling for heterogeneous multicore
processors using phase identification",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "125--127",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2011:EDH,
author = "Tonglin Li and Raman Verma and Xi Duan and Hui Jin and
Ioan Raicu",
title = "Exploring distributed hash tables in {HighEnd}
computing",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "128--130",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burdette:2012:ECJ,
author = "Philip F. Burdette and William F. Jones and Brian C.
Blose and Gregory M. Kapfhammer",
title = "An empirical comparison of {Java} remote communication
primitives for intra-node data transmission",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "2--11",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a benchmarking suite that measures
the performance of using sockets and eXtensible Markup
Language remote procedure calls (XML-RPC) to exchange
intra-node messages between Java virtual machines
(JVMs). The paper also reports on an empirical study
comparing sockets and XML-RPC with response time
measurements from timers that use both operating system
tools and Java language instrumentation. By leveraging
packet filters inside the GNU/Linux kernel, the
benchmark suite also calculates network resource
consumption. Moreover, the framework interprets the
response time results in light of memory subsystem
metrics characterizing the behavior of the JVM. The
empirical findings indicate that sockets perform better
when transmitting small to very large objects, while
XML-RPC exhibits lower response time than sockets with
extremely large bulk data transfers. The experiments
reveal trade-offs in performance and thus represent the
first step towards determining if Java remote
communication primitives can support the efficient
exchange of intra-node messages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gopalakrishnan:2012:SUT,
author = "Sathish Gopalakrishnan",
title = "Sharp utilization thresholds for some realtime
scheduling problems",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "12--22",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling policies for real-time systems exhibit
threshold behavior that is related to the utilization
of the task set they schedule, and in some cases this
threshold is sharp. A task set is considered
schedulable if it can be scheduled to meet all
associated deadlines. A schedulability test for a
chosen policy is a test of feasibility: given a task
set, can all deadlines be met? For the rate monotonic
scheduling policy, we show that periodic workload with
utilization less than a threshold U$_{RM}$ can be
scheduled almost surely and that all workload with
utilization greater than U$_{RM}$ is almost surely not
schedulable. We study such sharp threshold behavior in
the context of processor scheduling using static task
priorities, not only for periodic real-time tasks but
for aperiodic real-time tasks as well. The notion of a
utilization threshold provides a simple schedulability
test for most real-time applications. These results
improve our understanding of scheduling policies and
provide an interesting characterization of the typical
behavior of policies. The threshold is sharp (small
deviations around the threshold cause schedulability,
as a property, to appear or disappear) for most
policies; this is a happy consequence that can be used
to address the limitations of existing
utilization-based tests for schedulability. We
demonstrate the use of such an approach for balancing
power consumption with the need to meet deadlines in
web servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2012:SLR,
author = "Edward G. Coffman",
title = "Synthesis of local-rule processes: successes and
challenges (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "How does one systematically program global
computations in systems of a vast number of components
restricted to local-rule interaction in a flat
hierarchy? This question has been around since the 50's
when cellular automata were introduced as models of
such systems. The question posed here is known as the
synthesis problem, and remains poorly understood. Terms
like self-assembling and self-organizing are often used
to describe computations on such systems. We mention a
number of instances of local-rule processes at widely
different scales in computer and network engineering:
molecular computation, sensor-network computation, and
computation on the Web. Typical performance questions
that we address include the convergence to useful,
non-degenerate behavior: does it always occur, and if
so, how long does it take.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kou:2012:FPT,
author = "Steven S. G. Kou",
title = "First passage times and option pricing under a
mixed-exponential jump diffusion model (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper aims at extending the analytical
tractability of the Black- Scholes model to alternative
models with arbitrary jump size distributions. More
precisely, we propose a jump diffusion model for asset
prices whose jump sizes have a mixed-exponential
distribution, which is a weighted average of
exponential distributions but with possibly negative
weights. The new model extends existing models, such as
hyper-exponential and double-exponential jump diffusion
models, as the mixed-exponential distribution can
approximate any distribution as closely as possible,
including the normal distribution and various
heavy-tailed distributions. The mixed-exponential jump
diffusion model can lead to analytical solutions for
Laplace transforms of prices and sensitivity parameters
for path-dependent options such as lookback and barrier
options. The Laplace transforms can be inverted via the
Euler inversion algorithm. Numerical experiments
indicate that the formulae are easy to implement and
accurate. The analytical solutions are made possible
mainly because we solve a high-order
integro-differential equation related to first passage
times explicitly. A calibratrion example for SPY
options shows that the model can provide a reasonable
fit even for options with very short maturity, such as
one day. This is a joint work with Ning Cai at Hong
Kong Univ. of Science and Technology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neuts:2012:AMS,
author = "Marcel F. Neuts",
title = "The algorithmization of mathematics: the story of
stochastic models (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2012:PFD,
author = "Devavrat Shah",
title = "Product-form distributions and network algorithms
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185403",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ``product-form'' characterization of the
stationary distribution make a queueing network
analytically a lot more tractable. This has been the
primary source of inspiration in the search for
``product-form'' characterization. In this talk, I will
discuss implications of ``product-form'' distributions
for algorithm design by means of two examples: (i)
intra-queue scheduling and (ii) inter-queue scheduling
in a constrained queueing network. Near the end of the
talk, by means of a novel comparison result between
stationary distributions of Markov chains, I will
briefly discuss notion of ``approximate'' product-form
distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baek:2012:FPM,
author = "Jung Woo Baek and Ho Woo Lee and Se Won Lee and Soohan
Ahn",
title = "Factorization properties for a {MAP}-modulated fluid
flow model under server vacation policies (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "25--25",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study a MAP-modulated fluid flow
model under generalized server vacation policies and
propose factorization properties that can be
efficiently used to derive the fluid level
distributions at an arbitrary time point. Our model is
an extension of the conventional Markov modulated fluid
flow (MMFF) model to control the servers idle state. We
consider two types of fluid increases: vertical
increase (Type-V) and linear increase (Type-L). We
first describe the MAP-modulated fluid flow model under
server vacation policies and prove the factorization
principle for each type. Based on the factorization
formulae, we derive recursive formulae for performance
measures. Lastly, some application examples of the
factorization property are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bladt:2012:BME,
author = "Mogens Bladt and Luz Judith R. Esparza and Bo Friis
Nielsen",
title = "Bilateral matrix-exponential distributions (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "25--25",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this article we define the classes of bilateral and
multivariate bilateral matrix-exponential
distributions. These distributions have support on the
entire real space and have rational moment-generating
functions. These distributions extend the class of
bilateral phasetype distributions of [1] and the class
of multivariate matrix-exponential distributions of
[9]. We prove a characterization theorem stating that a
random variable has a bilateral multivariate
distribution if and only if all linear combinations of
the coordinates have a univariate bilateral
matrix-exponential distribution. As an application we
demonstrate that certain multivariate divisions, which
are governed by the underlying Markov jump process
generating a phasetype distribution, have a bilateral
matrix-exponential distribution at the time of
absorption, see also [4].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bladt:2012:MDP,
author = "Mogens Bladt and Bo Friis Nielsen",
title = "Moment distributions of phase-type (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "25--26",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both matrix-exponential and phase-type distributions
have a number of important closure properties. Among
those are the distributions of the age and residual
life-time of a stationary renewal process with
inter-arrivals of either type. In this talk we show
that the spread, which is the sum of the age an
residual life-time, is also phase-type distributed.
Moreover, we give some explicit representations. The
spread is known to have a first order moment
distribution. If $X$ is a positive random variable and
$ ?i$ is its $i$'th moment, then the function $ f i(x)
= x i f(x) / ?i$ is a density function, and the
corresponding distribution is called the $i$'th order
moment distribution.\par
We prove that the classes of matrix-exponential or
phase-type distributions are closed under the formation
of moment distributions of any order. Other
distributions which are closed under the formation of
moment distributions are e.g., log-normal, Pareto and
gamma distributions. We provide explicit
representations for both the matrix-exponential class
and for the phase-type distributions, where the latter
class may also use the former representations, but for
various reasons it is desirable to establish a
phase-type representation when dealing with phase-type
distributions.\par
For the first order distribution we present an explicit
formula for the related Lorenz curve and Gini index.
Moment distributions of orders one, two and three have
been extensively used in areas such as economy,
physics, demography and civil engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Drekic:2012:SPP,
author = "Steve Drekic and David Stanford and Douglas Woolford",
title = "A self-promoting priority model for transplant queues
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "26--26",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many jurisdictions, organ allocation is done on the
basis of the health status of the patient, either
explicitly or implicitly. This presentation presents a
Matrix-analytic priority model in which customers
self-promote to the higher priority level, to take into
account changes in health status over time. In the
first variant, all patients arrive as ``regular''
customers to the queue, but as the health of a patient
degrades, their status is promoted to ``priority'' to
reflect the increased urgency of the transplant.
Performance measures such as the mean and distribution
of the time until transplant are obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fackrell:2012:CME,
author = "Mark Fackrell",
title = "Characterizing matrix-exponential distributions of
order $4$ (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "26--26",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Laplace--Stieltjes transform of a
matrix-exponential distribution is a rational function.
If there are no common factors between the numerator
and denominator polynomials, then the order of the
matrix-exponential distribution is the degree of the
denominator polynomial. Given a rational
Laplace--Stieltjes transform, it is unknown, in
general, when it corresponds to a matrix-exponential
distribution. Matrix-exponential distributions of order
3 have been completely characterized in this manner,
but in this talk we look at the problem of
characterizing matrix-exponential distributions of
order 4.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hautphenne:2012:EAM,
author = "Sophie Hautphenne",
title = "An {EM} algorithm for the model fitting of {Markovian}
binary trees (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "26--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markovian binary trees are a special class of
branching processes in which the lifetime of an
individual is controlled by a transient Markovian
arrival process. A Markovian binary tree is
characterized by the 4-tuple ( ? ,D0,B, d ), where ? is
the vector of initial phase distribution of the first
individual, D0 is the matrix of phase transition rates
between birth and death events, B is the matrix of
birth rates and d is the vector of death rates. In
order to use the Markovian binary tree to model the
evolution of a real population, we need to determine
the parameters ( ? ,D0,B, d ) from observations of that
population. In the absence of migration, the only
observable changes in a population are those associated
with a birth or a death event; no phase transition in
the underlying process can actually been seen. We are
thus dealing with a problem of parameter estimation
from incomplete data, and one way to solve this
statistical problem is to make use of the EM algorithm.
Our purpose here is thus to specify this algorithm to
the Markovian binary tree setting. In the first part of
this paper, we introduce a discrete time terminating
marked Markov arrival process (MMAP), based on which a
class of discrete multivariate phase-type (MPH)
distributions is defined. The discrete
MPH-distributions hold many of the properties possessed
by continuous MPH-distributions (Assaf, et al. (1983),
Kulkarni (1988), and O'Cinneide (1990)). It is known
that the joint distribution functions of continuous MPH
are fairly complicated and difficult to calculate. In
contrast, for the discrete MPH introduced here, we
provide recursive formulas the joint probabilities and
explicit expressions for means, variances, and
co-variances.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hautphenne:2012:MTS,
author = "Sophie Hautphenne and Guy Latouche and Giang T.
Nguyen",
title = "{Markovian} trees subject to catastrophes: would they
survive forever? (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider multi-type Markovian branching processes
subject to catastrophes which kill random numbers of
living individuals at random epochs. It is well known
that the criteria for extinction of such a process is
related to the conditional growth rate of the
population, given the history of the process of
catastrophes, and that it is usually hard to evaluate.
We give a simple characterization in the case when all
individuals have the same probability of surviving a
catastrophe, and we determine upper and lower bounds in
the case where survival depends on the type of the
individual. The upper bound appears to be often much
tighter than the lower bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2012:DMV,
author = "Qi-Ming He and Jiandong Ren",
title = "On a discrete multi-variate phase-type distribution
and its applications (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the second part of this paper, we use the discrete
MPH-distributions to model multi-variate insurance
claim processes in risk analysis, where claims may
arrive in batches, the arrivals of different types of
batches may be correlated, and the amounts of different
types of claims in a batch may be dependent. This
provides one natural approach to model the dependencies
among claim frequencies as well claim sizes of
different types of risks, which is a very important
topic in insurance risk theory. Under certain
conditions, it is shown that the total amounts of
claims accumulated in some random time horizon are
discrete MPH random vectors. Matrix representations of
the discrete MPH-distributions are constructed
explicitly. Efficient computational methods are
developed for computing performance measures of the
total claims of different types of claim batches and
individual types of claims.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2012:MEP,
author = "Qi-Ming He and Hanqin Zhang and Juan Vera",
title = "Majorization and {Extremal PH}-Distributions (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185412",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents majorization results for PH
-generators. Based on the majorization results, Coxian
distributions are identified to be extremal PH
-distributions with respect to the first moment for
certain subsets of PH -distributions. Bounds on the
mean of phase-type distributions are found. In
addition, numerical results indicate that Coxian
distributions are extremal PH -distributions with
respect to the moment of any order for certain subsets
of PH -distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Horvath:2012:ARM,
author = "G{\'a}bor Horv{\'a}th and Mikl{\'o}s Telek",
title = "Acceptance-rejection methods for generating random
variates from matrix exponential distributions and
rational arrival processes (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic models based on matrix exponential
structures, like matrix exponential distributions and
rational arrival processes, have gained popularity in
analytical models recently. However the application of
these models in simulation based evaluations is not as
widespread yet. One of the possible reasons is the lack
of efficient random variates generation methods. In
this paper we propose methods for efficient random
variates generation for matrix exponential stochastic
models based on appropriate representations of the
models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kobayashi:2012:TAS,
author = "Masahiro Kobayashi and Yutaka Sakuma and Masakiyo
Miyazawa",
title = "Tail asymptotics of the stationary distribution for
{M/M-JSQ} with $k$ parallel queues (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "28--28",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185414",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a parallel queueing model which has k
identical servers. Assume that customers arrive from
outside according to a Poisson process and join the
shortest queue. Their service times have an i.i.d.
exponential distribution, which is referred to as an
M/MJSQ with k parallel queues. We are interested in the
asymptotic behavior of the stationary distribution for
the shortest queue length of this model, provided the
stability is assumed. For this stationary distribution,
it can be guessed conjectured that the tail decay rate
is given by the k-th power of the traffic intensity of
the corresponding M/M/k queue with a single waiting
line. We prove this fact by obtaining the exactly
geometric asymptotics. For this, we use two
formulations. One is a quasi-birth-and-death (QBD for
short) process which is typically used, and the other
is a reflecting random walk on the boundary of the k +
1-dimensional orthant which is a key for our proof.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamoorthy:2012:SDP,
author = "A. Krishnamoorthy and Viswanath C. Narayanan",
title = "Stochastic decomposition in production inventory with
service time (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "28--28",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185415",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study an $ (s, S) $ inventory system with positive
service time (for an overview of the work reported so
far in inventory with positive service time one may
refer to Krishnamoorthy, Lakshmi and Manikandan: A
survey on inventory models with positive service time,
OPSEARCH, DOI 10.1007/s12597-010-0032-z). This leads to
a queue of demands being formed. The process of demand
arrival constitutes a Poisson process. The duration of
each service is exponentially distributed. Our model is
a supply chain where items are added to the inventory
through a production process. This starts each time the
inventory level goes down to $s$ and continues to be on
until inventory level reaches $S$ with the time
required to add one unit of the item into the inventory
when the production is on, are independent, identically
distributed exponential random variables. Further all
distributions involved in this paper are assumed to be
mutually independent. We assume that no customer joins
the queue when the inventory level is $0$. This
assumption leads us to an explicit product form
solution for the steady state probability vector, using
a simple approach. This is despite the fact that there
is a strong correlation between lead time (the time
required to add an item into the inventory) and the
number of customers joining the queue during the lead
time (except when the inventory level is zero during
which time no customer joins the queue). The technique
is to combine the steady state probability vector of
the classical M/M/1 queue and that of the production
inventory system where each service requires negligible
time and no backlogs are allowed. Using a similar
technique, the expected length of a production cycle is
also obtained explicitly. The optimality of the highest
inventory level $S$ and the production switching on
level $s$ has been studied using a cost function
constructed using the steady state system performance
measures. Since we have obtained explicit expressions
for these measures, analytic expressions have been
derived for the optimal values of $S$ and $s$.\par
To show that our method can be applied to other similar
problems, we analyze in detail a variant of the above
problem (discussed in Schwarz M, Sauer C, Daduna H,
Kulik R and Szekli R: M/M/1 Queueing systems with
inventory, {\em Queueing Systems}, 54, 55--78, 2006).
For that model, we assume that in a production run,
production occurs only once in a cycle and the amount
produced is sufficient to take the inventory level back
to $S$. A brief discussion on the application of our
method to inventory system with lead time for
replenishment has also been provided.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Latouche:2012:TDF,
author = "Guy Latouche and Giang T. Nguyen and Zbigniew
Palmowski",
title = "Two-dimensional fluid queues with temporary assistance
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "28--28",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185416",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a two-dimensional stochastic fluid model
with N ONOFF inputs and temporary assistance, which is
an extension of the same model with N = 1 in
Mahabhashyam et al. (2008). The rates of change of both
buffers are piecewise constant and dependent on the
underlying Markovian phase of the model, and the rates
of change for Buffer 2 are also dependent on the
specific level of Buffer 1. This is because both
buffers share a fixed output capacity, the precise
proportion of which depends on Buffer 1. The
generalization of the number of ON-OFF inputs
necessitates modifications in the original rules of
output-capacity sharing from Mahabhashyam et al. (2008)
and considerably complicates both the theoretical
analysis and the numerical computation of various
performance measures. We derive the marginal
probability distribution of Buffer 1, and bounds for
that of Buffer 2. Furthermore, restricting Buffer 1 to
a finite size, we determine its marginal probability
distribution in the specific case of N = 1, thus
providing numerical comparisons to the corresponding
results in Mahabhashyam et al. (2008) where Buffer 1 is
assumed to be infinite.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramaswami:2012:FIB,
author = "V. Ramaswami",
title = "A fluid introduction to {Brownian} motion \&
stochastic integration (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--29",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185417",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This presentation provides an introduction to Brownian
motion and stochastic integrals using linear fluid
flows on finite state Markov chains. Many numerical
examples are presented setting the stage for the
development of algorithms for stochastic integration
via the well-studied and easily understood fluid flow
models driven by finite state Markov chains.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sonenberg:2012:NFM,
author = "Nikki Sonenberg and Peter G. Taylor",
title = "A network of fluid models and its application in
{MANETs} (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--29",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185418",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad hoc mobile networks are peer-to-peer systems whose
successful operation depends on the nodes contributing
the resources of their device. Nodes rely on portable
energy sources, for example batteries, to transmit to
each other. For the network to function, either the
nodes willingly cooperate or their behaviour is
influenced by an incentive mechanism. Building on work
by Latouche and Taylor (2009) and assuming finite
capacity buffers, we model each user's battery energy
and credit balance as fluids, with the rate of increase
or decrease of the fluid modulated by the network call
occupancy. This results in a network of stochastic
fluid models, which we analyse using a reduced-load
approach. We study the resources required to ensure the
network can maintain itself without having to drop
calls and investigate the design of a credit incentive
mechanism to discourage uncooperative behaviour in the
sharing of resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stanford:2012:NPP,
author = "David Stanford and Peter G. Taylor and Ilze Ziedins",
title = "A new paradigm for priority patient selection
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--29",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185419",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The central purpose of this work is to bridge the gap
between two aspects of health care systems: (1) Key
Performance Indicators (KPIs) for delay in access to
care for patient classes, with differing levels of
acuity or urgency, specify the fraction of patients
needing to be seen by some key time point. (2) Patient
classes present themselves for care, and consume health
care resources, in a fashion that is totally
independent of the KPIs. Rather, they present in a
manner determined by the prevalence of the medical
condition, at a rate that may vary over time. Treatment
times will likewise be determined by medical need and
current practice. There is no reason to expect the
resulting system performance will adhere to the
specified KPIs. The present work presents a new
paradigm for priority assignment that enables one to
fine-tune the system in order to achieve the delay
targets, assuming sufficient capacity exists for at
least one such arrangement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Toyoizumi:2012:ADS,
author = "Hiroshi Toyoizumi and Jeremy Field",
title = "Analysis of the dynamics of social queues by
quasi-birth-and-death processes (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--30",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185420",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A wide variety of animals are known to form simple
hierarchical groups called social queues, where
individuals inherit resources or social status in a
predictable order. Queues are often age-based, so that
a new individual joins the end of the queue on reaching
adulthood, and must wait for older individuals to die
in order to reach the front of the queue. While
waiting, an individual may work for her group, in the
process often risking her own survival and hence her
chance of inheritance. Eventually, she may survive to
reach the head of the queue and becomes the dominant of
the group. Queueing has been particularly well-studied
in hover wasps (Hymenoptera: Stenogastrinae). In hover
wasp social groups, only one female lays eggs, and
there is a strict, age-based queue to inherit the
reproductive position. While the dominant individual
(queen) concentrates on breeding, subordinate helpers
risk death by foraging outside the nest, but have a
slim chance of eventually inheriting dominance. Some
explanations for this altruistic behavior and for the
stability of social queues have been proposed and
analyzed [1, 2]. Since both the productivity of the
nest and the chance to inherit the dominant position
depend critically on group size, queueing dynamics are
crucial for understanding social queues, but detailed
analysis is lacking. Here, using hover wasps as an
example, we demonstrate that the application of
Little's formula [3] and quasi-birth-and-death (QBD)
processes are useful for analyzing queueing dynamics
and the population demographics of social queues. Let
(L(t),M(t)) be the number of adults and brood (eggs,
larvae and pupae) in a nest at time t. We model the
vector (L(t),M(t)) as a QBD process starting from the
state (L(0),M(0)) = (1, 0) to analyze the nest history
of a social queue. The boundary state {L(t) = 0}, which
corresponds to the termination of the nest, is regarded
as the taboo state of this QBD process. Let Q be the
transition rate matrix of the taboo process. By
choosing different Q, we can set various conditions for
the social queue. By using standard technique such as
calculating Q ?1, we can estimate and compare the
productivity of the nest in wide variety of social
queues in different queueing and environmental
conditions. Our work leads to better understanding of
how environmental conditions and strategic
decision-making by individuals interact to produce the
observed group dynamics; and in turn, how group
dynamics affects individual decision-making.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2012:IDD,
author = "B. {Van Houdt} and J. F. P{\'e}rez",
title = "The impact of dampening demand variability in a
production\slash inventory system with multiple
retailers (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "30--30",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185421",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a supply chain consisting of a single
manufacturer and two retailers. The manufacturer
produces goods on a make-to-order basis, while both
retailers maintain an inventory and use a periodic
replenishment rule. As opposed to the traditional $ (r,
S) $ policy, where a retailer at the end of each period
orders the demand seen during the previous period, we
assume that the retailers dampen their demand
variability by smoothing the order size. More
specifically, the order placed at the end of a period
is equal to $ \beta $ times the demand seen during the
last period plus $ (1 ? \beta) $ times the previous
order size, with $ \beta ? (0, 1] $ the smoothing
parameter. We develop a GI/M/1-type Markov chain with
only two nonzero blocks $ A_0 $ and $ A_d $ to analyze
this supply chain. The dimension of these blocks
prohibits us from computing its rate matrix R in order
to obtain the steady state probabilities. Instead we
rely on fast numerical methods that exploit the
structure of the matrices $ A_0 $ and $ A_d $, i.e.,
the power method, the Gauss--Seidel iteration and
GMRES, to approximate the steady state probabilities.
Finally, we provide various numerical examples that
indicate that the smoothing parameters can be set in
such a manner that all the involved parties benefit
from smoothing. We consider both homogeneous and
heterogeneous settings for the smoothing parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bean:2012:AQR,
author = "Nigel G. Bean and Bo Friis Nielsen",
title = "Analysis of queues with rational arrival process
components: a general approach",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "31--31",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185422",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bean:2012:SFM,
author = "Nigel G. Bean and Ma{\l}gorzata M. O'Reilly",
title = "A stochastic fluid model driven by an
uncountable-state process, which is a stochastic fluid
model itself: the stochastic fluid-fluid model",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "32--32",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bini:2012:CCR,
author = "Dario A. Bini and Paola Favati and Beatrice Meini",
title = "A compressed cyclic reduction for {QBDs} with low rank
upper and lower transitions",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "33--33",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185424",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a quasi-birth-and-death (QBD) Markov chain
[6], having probability transition matrix where Bi, Ai,
i = ?1, 0, 1, are m x m matrices. In the numerical
solution of QBD Markov chains a crucial step is the
efficient computation of the minimal nonnegative
solution R of the quadratic matrix equation X = X$_2$
A?1 + XA$_0$ + A$_1$. (1) To this purpose, many
numerical methods, with different properties, have been
designed in the last years (see for instance [1, 2, 3,
4]). However, many of these numerical methods are
defined for general block coefficients A?1, A0 and A1,
and do not exploit the possible structure of these
blocks. Recently, some attention has been addressed to
the case where A$_{?1}$ has only few non-null columns,
or A1 has only few non-null rows. These properties are
satisfied when the QBD has restricted transitions to
higher (or lower) levels. In particular, in [7] the
authors exploit these properties of the matrix
A$_{?1}$, or A$_1$, to formulate the QBD in terms of an
M/G/1 type Markov chain, where the block matrices have
size smaller than m; in particular, when both A?1 and
A1 have the desired property, the latter M/G/1 type
Markov chain reduces to a QBD. In [5] the structure of
A$_{?1}$ is used in order to reduce the computational
cost of some algorithms for computing R. Here we assume
that both the matrices A$_{?1}$ and A$_1$ have small
rank with respect to their size m. In particular, if
A?1 and A1 have only few non-null columns and rows,
respectively, they have small rank. We show that, under
this assumption, the matrix R can be computed by using
the cyclic reduction algorithm, where the matrices A(k)
i, i = ?1, 0, 1, generated at the kth step of the
algorithm, can be represented by small rank matrices.
In particular, if r$_{?1}$ is the rank of A$_{?1}$, and
if r$_1$ is the rank of A$_1$, then each step of cyclic
reduction can be performed by means of O((r$_{?1 + r1}$
)$_3$ ) arithmetic operations. This cost estimate must
be compared with the cost of O(m3) arithmetic
operations, needed without exploiting the structure of
A$_{?1}$ and A$_1$. Therefore, if r$_1$ and r$_1$ /are
much smaller than m, the advantage is evident. It
remains an open issue to understand how the structure
can be exploited in the case where only one between
A$_{?1}$ and A$_1$ has low rank.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bladt:2012:OMG,
author = "Mogens Bladt and Bo Friis Nielsen",
title = "An overview of multivariate gamma distributions as
seen from a (multivariate) matrix exponential
perspective",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "34--34",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Numerous definitions of multivariate exponential and
gamma distributions can be retrieved from the
literature [4]. These distributions belong to the class
of Multivariate Matrix-- Exponetial Distributions
(MVME) whenever their joint Laplace transform is a
rational function. The majority of these distributions
further belongs to an important subclass of MVME
distributions [5, 1] where the multivariate random
vector can be interpreted as a number of simultaneously
collected rewards during sojourns in a the states of a
Markov chain with one absorbing state, the rest of the
states being transient. We present the corresponding
representations for all such distributions. In this way
we obtain a unification of the variety of existing
distributions as well as a deeper understanding of
their probabilistic nature and a clarification of their
similarities and differences. In particular one may
easily generalize or combine any of the known
distributions by modifying the generators adequately.
Also, it is straightforward to simulate from this
class. Thus, by identifying distributions as belonging
to this subclass it becomes apparent how to simulate
from most previously discussed distributions with
rational Laplace transform. In a longer perspective
stochastic and statistical analysis for MVME will in
particular apply to any of the previously defined
distributions. Multivariate gamma distributions have
been used in a variety of fields like hydrology, [11],
[10], [6], space (wind modeling) [9] reliability [3],
[7], traffic modeling [8], and, finance [2]. It is our
hope that our the paper will assist practitioners in
formulating and analyzing models in a much more
transparent and easily accessible way.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blanchet:2012:RES,
author = "Jose Blanchet and Jing Dong",
title = "Rare-event simulation for multi-server queues in the
{Halfin--Whitt} regime",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "35--35",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2012:PFA,
author = "Giuliano Casale and Peter G. Harrison and Maria Grazia
Vigliotti",
title = "Product-form approximation of queueing networks with
phase-type service",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "36--36",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2012:NAD,
author = "J. G. Dai and Shuangchi He",
title = "Numerical analysis for diffusion models of {GI/Ph/n
$+$ GI} queues",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "37--37",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185428",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denardo:2012:SFM,
author = "Eric V. Denardo and Eugene A. Feinberg and Uriel G.
Rothblum",
title = "Splitting in a finite {Markov} decision problem",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "38--38",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185429",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denardo:2012:MAB,
author = "Eric V. Denardo and Eugene A. Feinberg and Uriel G.
Rothblum",
title = "The multi-armed bandit, with constraints",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "39--39",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185430",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The colorfully-named and much-studied multi-armed
bandit is the following Markov decision problem: At
epochs 1, 2, \ldots{}, a decision maker observes the
current state of each of several Markov chains with
rewards (bandits) and plays one of them. The Markov
chains that are not played remain in their current
states. The Markov chain that is played evolves for one
transition according to its transition probabilities,
earning an immediate reward (possibly negative) that
can depend upon its current state and on the state to
which transition occurs. Henceforth, to distinguish the
states of the individual Markov chains from those of
the Markov decision problem, the latter are called
multi-states. Each multi-state prescribes a state for
each of the Markov chains. This version of the
multi-armed bandit problem was originally solved by
John Gittins. It has a large range of operations
research applications including applications to
resource allocation, scheduling, project management,
and search. A key result for the multi-armed bandit is
that attention can be restricted to a simple class of
decision procedures. A label is assigned to each state
of each bandit such that no two states have the same
label, even if they are in different bandits. A
priority rule is a policy that, given each multistate,
plays the Markov chain whose current state has the
lowest label. The literature includes several different
proofs of the optimality of a priority rule. Nearly all
of these proofs rest on a family of optimal stopping
times, one for each state of each bandit. A different
approach is taken here. Pair-wise comparison, rather
than optimal stopping, is used to demonstrate the
optimality of a priority rule. This is accomplished for
models having linear and exponential utility functions.
Elementary row operations are used to identify an
optimal priority rule and to compute its expected
utility for a given starting state. Our analysis covers
the cases of linear and exponential utilities. In the
case of a linear utility function, the model is
generalized to include constraints that link the
bandits. With C constraints, an optimal policy is shown
to take the form of an initial randomization over C + 1
priority rules, and column generation is proposed as a
solution method. The proposed computational methods are
based on several matrix algorithms. First, an
algorithm, called the Triangularizer, transforms the
one-step rewards and transition probability matrixes
for individual bandits by applying elementary row
operations. The transformed matrixes, called finalized,
are triangle: all their elements on diagonals and below
diagonals are equal to zero. For a given index policy,
running the transformed bandits is equivalent to
running the original bandits. Second, the transition
probabilities and one-step rewards of the transformed
bandits are used to compute the performance
characteristics of index policies in polynomial times.
These computations are used by the column generation
algorithm for multi-armed bandits with constraints.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dendievel:2012:SDP,
author = "S. Dendievel and G. Latouche and M-A. Remiche",
title = "Stationary distribution of a perturbed {QBD} process",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "40--40",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185431",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider Quasi-Birth-and-Death processes and our
purpose is to assess the impact of small variation of
the initial parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Govorun:2012:PRP,
author = "Maria Govorun and Guy Latouche and Marie-Ange
Remiche",
title = "Profits and risks of pension plans",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "41--41",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185432",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kobayashi:2012:RTA,
author = "Masahiro Kobayashi and Masakiyo Miyazawa",
title = "Revisit to the tail asymptotics of the double {QBD}
process by the analytic function method",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "42--42",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185433",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2012:FMM,
author = "Yunan Liu and Ward Whitt",
title = "A fluid model for many-server queues with time-varying
arrivals and phase-type service distribution",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "43--43",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185434",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Margolius:2012:NSM,
author = "Barbara Margolius",
title = "Numerical study of {Markovian} arrival processes
{(MAP)} with time-varying periodic arrival rates",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "44--44",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185435",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the arrival rate of a Markovian Arrival
Process with time-varying periodic transition rates.
The arrival rate can vary widely for a MAP with fixed
average transition rates by selecting appropriate
transition rate functions over the period.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{OReilly:2012:SDF,
author = "Ma{\l}gorzata M. O'Reilly and Nigel G. Bean",
title = "Stochastic 2-dimensional fluid model",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "45--45",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185436",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bini:2012:SQM,
author = "D. Bini and B. Meini and S. Steff{\'e} and J. F.
P{\'e}rez and B. {Van Houdt}",
title = "{SMCSolver} and {Q-MAM}: tools for matrix-analytic
methods",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "46--46",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185437",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Matrix-analytic methods have advanced considerably
since the pioneering work of Marcel Neuts [6, 5] on
Quasi-Birth-Death (QBD), GI/M/1- and M/G/1- type Markov
chains (MCs). Especially the algorithms involved to
(iteratively) solve these structured Markov chains have
matured a lot, which has resulted in more efficient,
but also more complex algorithms [4, 1]. While the
first algorithms were straightforward to implement---as
they were based on simple functional iterations---more
advanced algorithms/features like cyclic-reduction, the
Newton iteration or the shift technique (to accelerate
convergence), require more effort; in particular for
GI/M/1- and M/G/1-type Markov chains. This has
motivated us to develop the Structured Markov Chain
Solver (SMCSolver) tool [2], which implements a large
number of basic and more advanced algorithms for
solving QBD, GI/M/1- and M/G/1-type MCs1 (as well as
the more general Non-Skip-Free M/G/1-type MCs). The
MATLAB version of the tool consists of a collection of
MATLAB functions, while the Fortran version is
accompanied by a graphical user-interface (GUI). Apart
from making these more advanced algorithms accessible
to non-specialists, the tool is also useful as a
platform for the development and study of new
algorithms and acceleration techniques. Since its
initial release in 2006, various extensions have been
made. In [3] different transformation techniques and
shift strategies are incorporated in order to speed up
and optimize the algorithms, while even more recently
an efficient Newton iteration for GI/M/1- and
M/G/1-type Markov chains was included [8].
Matrix-analytic methods have also been very effective
in the analysis of many queueing systems in both
discrete- and continuous-time. The Q-MAM tool [7] is a
collection of MATLAB functions that allows one to
compute the queue length, waiting time and delay
distribution of various queueing systems of infinite
size. It includes amongst others implementations of the
PH/PH/1, MAP/MAP/1, MAP/M/c, MAP/D/c, RAP/RAP/1,
MMAP[K]/PH[K]/1, MMAP[K]/SM[K]/1, SM[K]/PH[K]/1 (many
in both discrete- and continuous-time), where
state-of-the-art solution techniques are used to solve
these models efficiently. The Matlab version of the
SMCSolver and Q-MAM tool is available at
http://win.ua.ac.be/\%7Evanhoudt/ while the Fortran 90
version of the SMCSolver tool with the GUI can be
downloaded from http://bezout.dm.unipi.it/SMCSolver.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2012:KTF,
author = "Giuliano Casale and Evgenia Smirni",
title = "{KPC-toolbox}: fitting {Markovian} arrival processes
and phase-type distributions with {MATLAB}",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "47--47",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185438",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cote:2012:JPS,
author = "Marco Cote and German Riano and Raha
Akhavan-Tabatabaei and Juan Fernando Perez and Andres
Sarmiento and Julio Goez",
title = "{jMarkov} package: a stochastic modeling tool",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "48--48",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185439",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When analyzing real life stochastic systems in most
cases is easier, cheaper and more effective to use
analytical models rather than studying the physical
system or a simulation model of it. The stochastic
modeling is a powerful tool that helps the analysis and
optimization of stochastic systems. However the use of
stochastic modeling is not widely spread in today's
industries and among practitioners. This lack of
acceptance is caused by two main reasons the first
being the curse of dimensionality, which is defined by
the number of states required to describe a system.
This number grows exponentially as the size of the
system increases. The second reason is the lack of
user-friendly and efficient software packages that
allow the modeling of the problem without involving the
user with the implementation of the solution algorithms
to solve it. The curse of dimensionality is a constant
problem that has been addressed by different approaches
through time, but it is not intended within the scope
of our work; our focus is on the latter issue. We
propose a generic solver that enables the user to focus
on modeling without getting involved in the complexity
required by the solution methods. We design an object
oriented framework for stochastic modeling with four
components namely, jMarkov which models Markov Chains,
jQBD which models Quasi Birth and Death Processes,
jPhase which models Phase Types Distributions and jMDP
which models Markov Decision Processes. We concentrate
all our effort on creating a software that allows the
user to model any kind of system like a Markov Chain,
QBD or MDP with fairly basic knowledge of programming.
To this end we separate the modeling part from the
solution algorithms; therefore the user only needs to
mathematically model the problem and the software will
do the rest. However, we leave the package with the
possibility that experienced users can code their own
solution algorithms; this is done since the package
only contains the most common algorithms found in the
literature. The software does not use external plain
files like '.txt' or '.dat' written with specific
commands, but rather it is based on OOP (Object
Oriented Programming). The main advantages of it
include implementation in Java framework, which allows
the computational representation of the model to be
very similar to its mathematical representation such
that it would become natural to pass from one to
another. Also the program possesses the usual
characteristics of Java such as the use of inheritance
and abstraction. Finally, Java is a high level
computational language so the user doesn't need to be
concerned about technical problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casolari:2012:SRC,
author = "Sara Casolari and Michele Colajanni and Stefania
Tosi",
title = "Selective resource characterization for evaluation of
system dynamics",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "51--60",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185441",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Management decisions to achieve peak performance
operations, scalability and availability in distributed
systems require a continuous statistical
characterization of data sets coming from server and
network monitors. Due to the increasing sizes of data
centers and their continuous dynamic changes, the
traditional approaches that work on all data sets in a
centralized way are impractical. We propose a strategy
for data processing that is able to limit the analysis
of the large sets of collected measures to a smaller
subset of significant information for a twofold
purpose: to classify the collected data sets in few
classes characterized by similar statistical behaviors,
to evaluate the dynamics of the overall system and its
most relevant changes. The proposed strategy works at
the level of server resources and of significant
aggregation of servers of the overall distributed
system. Several experimental results demonstrate the
feasibility of the proposed strategy that is validated
in real contexts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aceto:2012:RUE,
author = "Giuseppe Aceto and Antonio Pescap{\`e}",
title = "On the recent use of email through traffic and network
analysis: the impact of {OSNs}, new trends, and other
communication platforms",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "61--70",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185442",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since the late 1971 --- when Ray Tomlinson invented
Internet-based email and sent the first message on
ARPANET --- email technology has evolved a lot, and
nowadays it is one of the most widely used applications
on the Internet. Despite this primacy, during the last
years other ways to exchange messages have been used by
Internet users (e.g. Instant Messaging, Social
Networks, microblogs, etc.). In this paper we propose a
methodology based on heterogeneous data sources to
analyze the amount of traffic associated with emails in
order to gain knowledge on the use of email by Internet
users in the last years. We consider real traffic
traces that are well known to the research community as
well as locally captured, and discuss them in the light
of other related phenomena: social networks adoption,
online advertising trends, abusive email spreads,
etc..We discuss the trend of email traffic in the last
10 years and we provide explanations related to the
impact, on the email usage, of the utilization of other
communication platforms. This work represents a first
step towards a framework in which to analyze the trend
of the email traffic and the associated phenomena as
well as the understanding of the upcoming novel
communications behavior of Internet users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Distefano:2012:DAB,
author = "Salvatore Distefano and Antonio Puliafito and Kishor
S. Trivedi",
title = "Dynamic aspects and behaviors of complex systems in
performance and reliability assessment",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "71--78",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185443",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Reliability and performance evaluation are important,
often mandatory, steps in designing and analyzing
(critical) systems. In such cases, accurate models are
required to adequately take into account interference
or dependent behaviors affecting the system, its parts
and the external environment, especially if the system
experiences high complexity. The techniques and tools
to adopt in the evaluation have to adequately fit the
problem considered. The main goal of this paper is to
identify the dynamic-dependent aspects that can affect
the reliability and performance of a system. Starting
from the concept of dependence at the basis of system
decomposition, an analytic framework and some of the
most important dynamic-dependent aspects and behaviors
are characterized in terms of both dynamic reliability
and performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mahmud:2012:CST,
author = "Nidhal Mahmud and Martin Walker and Yiannis
Papadopoulos",
title = "Compositional synthesis of temporal fault trees from
state machines",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "79--88",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185444",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dependability analysis of a dynamic system which is
embedded with several complex interrelated components
raises two main problems. First, it is difficult to
represent in a single coherent and complete picture how
the system and its constituent parts behave in
conditions of failure. Second, the analysis can be
unmanageable due to a considerable number of failure
events, which increases with the number of components
involved. To remedy this problem, in this paper we
outline an analysis approach that converts failure
behavioural models (state machines) to temporal fault
trees (TFTs), which can then be analysed using Pandora
--- a recent technique for introducing temporal logic
to fault trees. The approach is compositional and
potentially more scalable, as it relies on the
synthesis of large system TFTs from smaller component
TFTs. We show, by using a Generic Triple Redundant
(GTR) system, how the approach enables a more accurate
and full analysis of an increasingly complex system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abundo:2012:ACP,
author = "Marco Abundo and Valeria Cardellini and Francesco {Lo
Presti}",
title = "Admission control policies for a multi-class
{QoS}-aware service oriented architecture",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "89--98",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185445",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the service computing paradigm, a service broker
can build new applications by composing
network-accessible services offered by loosely coupled
independent providers. In this paper, we address the
problem of providing a service broker, which offers to
prospective users a composite service with a range of
different Quality of Service (QoS) classes, with a
forward-looking admission control policy based on
Markov Decision Processes (MDP). This mechanism allows
the broker to decide whether to accept or reject a new
potential user in such a way to maximize its gain while
guaranteeing non-functional QoS requirements to its
already admitted users. We model the broker using a
continuous-time MDP and consider various techniques
suitable to solve both infinite-horizon and
finite-horizon MDPs. To assess the effectiveness of the
MDP-based admission control for the service broker, we
present simulation results where we compare the optimal
decisions obtained by the analytical solution of the
MDP with other admission control policies. To deal with
large problem instances, we also propose a heuristic
policy for the MDP solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Persona:2012:HQM,
author = "Vittoria de Nitto Person{\`a}",
title = "Heuristics for {QoS} maintenance: adaptive policies in
differentiated services wireless networks",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "99--107",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185446",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The fluctuation in resource availability demands for
adaptive behavior in wireless environments. The problem
is exacerbated by the differentiated services with
different quality demands. We present the MATS+ scheme,
an adaptive bandwidth allocation and admission control
algorithm for mobile integrated services networks. This
extends the recently proposed MATS scheme [11] to
include non-real time classes and a per-class
utilization control. We define an analytical model and
performance metrics to evaluate the proposed scheme.
The efficiency and flexibility of the analytical model
allows conducting several experiments in a real word
scenario by changing different system parameters. From
the obtained results we define an interesting
heuristics to initialize the scheme guaranteeing QoS
requirements and to maintain the QoS while adapting to
environment changing conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anceaume:2012:PEL,
author = "Emmanuelle Anceaume and Romaric Ludinard and Bruno
Sericola",
title = "Performance evaluation of large-scale dynamic
systems",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "108--117",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185447",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an in-depth study of the
dynamicity and robustness properties of large-scale
distributed systems, and in particular of peer-to-peer
systems. When designing such systems, two major issues
need to be faced. First, population of these systems
evolves continuously (nodes can join and leave the
system as often as they wish without any central
authority in charge of their control), and second,
these systems being open, one needs to defend against
the presence of malicious nodes that try to subvert the
system. Given robust operations and adversarial
strategies, we propose an analytical model of the local
behavior of clusters, based on Markov chains. This
local model provides an evaluation of the impact of
malicious behaviors on the correctness of the system.
Moreover, this local model is used to evaluate
analytically the performance of the global system,
allowing to characterize the global behavior of the
system with respect to its dynamics and to the presence
of malicious nodes and then to validate our approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:2012:PIF,
author = "Naresh M. Patel",
title = "Performance implications of flash and storage class
memories",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "1--2",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254758",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The storage industry has seen incredible growth in
data storage needs by both consumers and enterprises.
Long-term technology trends mean that the data deluge
will continue well into the future. These trends
include the big-data trend (driven by data mining
analytics, high-bandwidth needs, and large content
repositories), server virtualization, cloud storage,
and Flash. We will cover how Flash and storage class
memories (SCM) interact with some of these major trends
from a performance perspective.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2012:HPC,
author = "Zhen Liu",
title = "High-performance computing in mobile services",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "3--4",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254759",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the ever increasing popularity of smart phones,
mobile services have been evolving rapidly to allow
users to enjoy localized and personalized experiences.
Users can discover local information and keep connected
with family and friends on the go, and ultimately to
experience the convergence of cyber space and physical
world where digital technologies are interwoven into
the day-to-day life. A pivotal component of such a
cyber-physical convergence is the contextual
intelligence. The extraction and dissemination of
contextual information around users is the key for the
cyber capabilities to be applied to physical activities
and for the cyber world to better reflect the physical
reality. In this talk, we shall address some issues
arising from context-based mobile services. In
particular, we discuss how mobility impacts contextual
relevancy and personalization in mobile services. The
relevancy and timeliness of contextual information not
only are essential for these services to deliver great
user experiences, but also put significant computation
pressure on service infrastructure that processes
continuous data streams in real time and disseminate
relevant data to a large amount of mobile users. This
talk will explore the challenges and opportunities for
high-performance computing in mobile services. Based on
key findings from large-scale mobile measurement data,
the talk will analyze the tradeoff of different
computing architectures, present case studies of
scalable system design and implementation for
personalized mobile services, and conclude with open
challenges for the broad research community in
performance measurement and modeling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2012:DTM,
author = "Jian Tan and Xiaoqiao Meng and Li Zhang",
title = "Delay tails in {MapReduce} scheduling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "5--16",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MapReduce/Hadoop production clusters exhibit
heavy-tailed characteristics for job processing times.
These phenomena are resultant of the workload features
and the adopted scheduling algorithms. Analytically
understanding the delays under different schedulers for
MapReduce can facilitate the design and deployment of
large Hadoop clusters. The map and reduce tasks of a
MapReduce job have fundamental difference and tight
dependence between them, complicating the analysis.
This also leads to an interesting starvation problem
with the widely used Fair Scheduler due to its greedy
approach to launching reduce tasks. To address this
issue, we design and implement Coupling Scheduler,
which gradually launches reduce tasks depending on map
task progresses. Real experiments demonstrate
improvements to job response times by up to an order of
magnitude. Based on extensive measurements and source
code investigations, we propose analytical models for
the default FIFO and Fair Scheduler as well as our
implemented Coupling Scheduler. For a class of
heavy-tailed map service time distributions, i.e.,
regularly varying of index -a, we derive the
distribution tail of the job processing delay under the
three schedulers, respectively. The default FIFO
Scheduler causes the delay to be regularly varying of
index -a+1. Interestingly, we discover a criticality
phenomenon for Fair Scheduler, the delay under which
can change from regularly varying of index -a to -a+1,
depending on the maximum number of reduce tasks of a
job. Other more subtle behaviors also exist. In
contrast, the delay distribution tail under Coupling
Scheduler can be one order lower than Fair Scheduler
under some conditions, implying a better performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2012:OQS,
author = "Devavrat Shah and Neil Walton and Yuan Zhong",
title = "Optimal queue-size scaling in switched networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "17--28",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a switched (queueing) network in which
there are constraints on which queues may be served
simultaneously; such networks have been used to
effectively model input-queued switches and wireless
networks. The scheduling policy for such a network
specifies which queues to serve at any point in time,
based on the current state or past history of the
system. In the main result of this paper, we provide a
new class of online scheduling policies that achieve
optimal average queue-size scaling for a class of
switched networks including input-queued switches. In
particular, it establishes the validity of a conjecture
about optimal queue-size scaling for input-queued
switches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hyytia:2012:MSH,
author = "Esa Hyyti{\"a} and Samuli Aalto and Aleksi Penttinen",
title = "Minimizing slowdown in heterogeneous size-aware
dispatching systems",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "29--40",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system of parallel queues where tasks
are assigned (dispatched) to one of the available
servers upon arrival. The dispatching decision is based
on the full state information, i.e., on the sizes of
the new and existing jobs. We are interested in
minimizing the so-called mean slowdown criterion
corresponding to the mean of the sojourn time divided
by the processing time. Assuming no new jobs arrive,
the shortest-processing-time-product (SPTP) schedule is
known to minimize the slowdown of the existing jobs.
The main contribution of this paper is three-fold: (1)
To show the optimality of SPTP with respect to slowdown
in a single server queue under Poisson arrivals; (2) to
derive the so-called size-aware value functions for
M/G/1-FIFO/LIFO/SPTP with general holding costs of
which the slowdown criterion is a special case; and (3)
to utilize the value functions to derive efficient
dispatching policies so as to minimize the mean
slowdown in a heterogeneous server system. The derived
policies offer a significantly better performance than
e.g., the size-aware-task-assignment with equal load
(SITA-E) and least-work-left (LWL) policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leconte:2012:BGS,
author = "Mathieu Leconte and Marc Lelarge and Laurent
Massouli{\'e}",
title = "Bipartite graph structures for efficient balancing of
heterogeneous loads",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "41--52",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers large scale distributed content
service platforms, such as peer-to-peer video-on-demand
systems. Such systems feature two basic resources,
namely storage and bandwidth. Their efficiency
critically depends on two factors: (i) content
replication within servers, and (ii) how incoming
service requests are matched to servers holding
requested content. To inform the corresponding design
choices, we make the following contributions. We first
show that, for underloaded systems, so-called
proportional content placement with a simple greedy
strategy for matching requests to servers ensures full
system efficiency provided storage size grows
logarithmically with the system size. However, for
constant storage size, this strategy undergoes a phase
transition with severe loss of efficiency as system
load approaches criticality. To better understand the
role of the matching strategy in this performance
degradation, we characterize the asymptotic system
efficiency under an optimal matching policy. Our
analysis shows that -in contrast to greedy matching-
optimal matching incurs an inefficiency that is
exponentially small in the server storage size, even at
critical system loads. It further allows a
characterization of content replication policies that
minimize the inefficiency. These optimal policies,
which differ markedly from proportional placement, have
a simple structure which makes them implementable in
practice. On the methodological side, our analysis of
matching performance uses the theory of local weak
limits of random graphs, and highlights a novel
characterization of matching numbers in bipartite
graphs, which may both be of independent interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Atikoglu:2012:WAL,
author = "Berk Atikoglu and Yuehai Xu and Eitan Frachtenberg and
Song Jiang and Mike Paleczny",
title = "Workload analysis of a large-scale key-value store",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "53--64",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254766",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Key-value stores are a vital component in many
scale-out enterprises, including social networks,
online retail, and risk analysis. Accordingly, they are
receiving increased attention from the research
community in an effort to improve their performance,
scalability, reliability, cost, and power consumption.
To be effective, such efforts require a detailed
understanding of realistic key-value workloads. And yet
little is known about these workloads outside of the
companies that operate them. This paper aims to address
this gap. To this end, we have collected detailed
traces from Facebook's Memcached deployment, arguably
the world's largest. The traces capture over 284
billion requests from five different Memcached use
cases over several days. We analyze the workloads from
multiple angles, including: request composition, size,
and rate; cache efficacy; temporal patterns; and
application use cases. We also propose a simple model
of the most representative trace to enable the
generation of more realistic synthetic workloads by the
community. Our analysis details many characteristics of
the caching workload. It also reveals a number of
surprises: a GET/SET ratio of 30:1 that is higher than
assumed in the literature; some applications of
Memcached behave more like persistent storage than a
cache; strong locality metrics, such as keys accessed
many millions of times a day, do not always suffice for
a high hit rate; and there is still room for efficiency
and hit rate improvements in Memcached's
implementation. Toward the last point, we make several
suggestions that address the exposed deficiencies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2012:FLC,
author = "Muhammad Zubair Shafiq and Lusheng Ji and Alex X. Liu
and Jeffrey Pang and Jia Wang",
title = "A first look at cellular machine-to-machine traffic:
large scale measurement and characterization",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "65--76",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254767",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cellular network based Machine-to-Machine (M2M)
communication is fast becoming a market-changing force
for a wide spectrum of businesses and applications such
as telematics, smart metering, point-of-sale terminals,
and home security and automation systems. In this
paper, we aim to answer the following important
question: Does traffic generated by M2M devices impose
new requirements and challenges for cellular network
design and management? To answer this question, we take
a first look at the characteristics of M2M traffic and
compare it with traditional smartphone traffic. We have
conducted our measurement analysis using a week-long
traffic trace collected from a tier-1 cellular network
in the United States. We characterize M2M traffic from
a wide range of perspectives, including temporal
dynamics, device mobility, application usage, and
network performance. Our experimental results show that
M2M traffic exhibits significantly different patterns
than smartphone traffic in multiple aspects. For
instance, M2M devices have a much larger ratio of
uplink to downlink traffic volume, their traffic
typically exhibits different diurnal patterns, they are
more likely to generate synchronized traffic resulting
in bursty aggregate traffic volumes, and are less
mobile compared to smartphones. On the other hand, we
also find that M2M devices are generally competing with
smartphones for network resources in co-located
geographical regions. These and other findings suggest
that better protocol design, more careful spectrum
allocation, and modified pricing schemes may be needed
to accommodate the rise of M2M devices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2012:BPB,
author = "Jinyoung Han and Seungbae Kim and Taejoong Chung and
Ted Taekyoung Kwon and Hyun-chul Kim and Yanghee Choi",
title = "Bundling practice in {BitTorrent}: what, how, and
why",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "77--88",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We conduct comprehensive measurements on the current
practice of content bundling to understand the
structural patterns of torrents and the participant
behaviors of swarms on one of the largest BitTorrent
portals: The Pirate Bay. From the datasets of the 120K
torrents and 14.8M peers, we investigate what
constitutes torrents and how users participate in
swarms from the perspective of bundling, across
different content categories: Movie, TV, Porn, Music,
Application, Game and E-book. In particular, we focus
on: (1) how prevalent content bundling is, (2) how and
what files are bundled into torrents, (3) what
motivates publishers to bundle files, and (4) how peers
access the bundled files. We find that over 72\% of
BitTorrent torrents contain multiple files, which
indicates that bundling is widely used for file
sharing. We reveal that profit-driven BitTorrent
publishers who promote their own web sites for
financial gains like advertising tend to prefer to use
the bundling. We also observe that most files (94\%) in
a bundle torrent are selected by users and the bundle
torrents are more popular than the single (or
non-bundle) ones on average. Overall, there are notable
differences in the structural patterns of torrents and
swarm characteristics (i) across different content
categories and (ii) between single and bundle
torrents.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gan:2012:EEC,
author = "Lingwen Gan and Anwar Walid and Steven Low",
title = "Energy-efficient congestion control",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "89--100",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254770",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Various link bandwidth adjustment mechanisms are being
developed to save network energy. However, their
interaction with congestion control can significantly
reduce network throughput, and is not well understood.
We firstly put forward a framework to study this
interaction, and then propose an easily implementable
dynamic bandwidth adjustment (DBA) mechanism for the
links. In DBA, each link updates its bandwidth
according to an integral control law to match its
average buffer size with a target buffer size. We prove
that DBA reduces link bandwidth without sacrificing
throughput---DBA only turns off excess bandwidth---in
the presence of congestion control. Preliminary ns2
simulations confirm this result.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2012:UAD,
author = "Predrag R. Jelenkovic and Evangelia D. Skiani",
title = "Uniform approximation of the distribution for the
number of retransmissions of bounded documents",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "101--112",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Retransmission-based failure recovery represents a
primary approach in existing communication networks, on
all protocol layers, that guarantees data delivery in
the presence of channel failures. Contrary to the
traditional belief that the number of retransmissions
is geometrically distributed, a new phenomenon was
discovered recently, which shows that retransmissions
can cause long (-tailed) delays and instabilities even
if all traffic and network characteristics are
light-tailed, e.g., exponential or Gaussian. Since the
preceding finding holds under the assumption that data
sizes have infinite support, in this paper we
investigate the practically important case of bounded
data units {0$<$}= L$_b$ {$<$}= b. To this end, we
provide an explicit and uniform characterization of the
entire body of the retransmission distribution Pr[N$_b$
{$>$} n] in both n and b. This rigorous approximation
clearly demonstrates the previously observed transition
from power law distributions in the main body to
exponential tails. The accuracy of our approximation is
validated with a number of simulation experiments.
Furthermore, the results highlight the importance of
wisely determining the size of data units in order to
accommodate the performance needs in
retransmission-based systems. From a broader
perspective, this study applies to any other system,
e.g., computing, where restart mechanisms are employed
after a job processing failure.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2012:FLA,
author = "Benny {Van Houdt} and Luca Bortolussi",
title = "Fluid limit of an asynchronous optical packet switch
with shared per link full range wavelength conversion",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "113--124",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254772",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an asynchronous all optical packet switch
(OPS) where each link consists of $N$ wavelength
channels and a pool of $ C \leq N$ full range tunable
wavelength converters. Under the assumption of Poisson
arrivals with rate \lambda (per wavelength channel) and
exponential packet lengths, we determine a simple
closed-form expression for the limit of the loss
probabilities $ P_{\rm loss}(N)$ as $N$ tends to
infinity (while the load and conversion ratio $ \sigma
= C / N$ remains fixed). More specifically, for $
\sigma \leq \lambda^2$ the loss probability tends to $
(\lambda^2 - \sigma) / \lambda (1 + \lambda)$, while
for $ \sigma > \lambda^2$ the loss tends to zero. We
also prove an insensitivity result when the exponential
packet lengths are replaced by certain classes of
phase-type distributions. A key feature of the
dynamical system (i.e., set of ODEs) that describes the
limit behavior of this OPS switch, is that its
right-hand side is discontinuous. To prove the
convergence, we therefore had to generalize some
existing result to the setting of piece-wise smooth
dynamical systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hua:2012:TOE,
author = "Nan Hua and Ashwin Lall and Baochun Li and Jun Xu",
title = "Towards optimal error-estimating codes through the
lens of {Fisher} information analysis",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "125--136",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254773",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Error estimating coding (EEC) has recently been
established as an important tool to estimate bit error
rates in the transmission of packets over wireless
links, with a number of potential applications in
wireless networks. In this paper, we present an
in-depth study of error estimating codes through the
lens of Fisher information analysis and find that the
original EEC estimator fails to exploit the information
contained in its code to the fullest extent. Motivated
by this discovery, we design a new estimator for the
original EEC algorithm, which significantly improves
the estimation accuracy, and is empirically very close
to the Cramer-Rao bound. Following this path, we
generalize the EEC algorithm to a new family of
algorithms called gEEC generalized EEC. These
algorithms can be tuned to hold 25-35\% more
information with the same overhead, and hence deliver
even better estimation accuracy---close to optimal, as
evidenced by the Cramer-Rao bound. Our theoretical
analysis and assertions are supported by extensive
experimental evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vulimiri:2012:HWC,
author = "Ashish Vulimiri and Gul A. Agha and Philip Brighten
Godfrey and Karthik Lakshminarayanan",
title = "How well can congestion pricing neutralize denial of
service attacks?",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "137--150",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254775",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Denial of service protection mechanisms usually
require classifying malicious traffic, which can be
difficult. Another approach is to price scarce
resources. However, while congestion pricing has been
suggested as a way to combat DoS attacks, it has not
been shown quantitatively how much damage a malicious
player could cause to the utility of benign
participants. In this paper, we quantify the protection
that congestion pricing affords against DoS attacks,
even for powerful attackers that can control their
packets' routes. Specifically, we model the limits on
the resources available to the attackers in three
different ways and, in each case, quantify the maximum
amount of damage they can cause as a function of their
resource bounds. In addition, we show that congestion
pricing is provably superior to fair queueing in attack
resilience.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Niu:2012:PCB,
author = "Di Niu and Chen Feng and Baochun Li",
title = "Pricing cloud bandwidth reservations under demand
uncertainty",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "151--162",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254776",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a public cloud, bandwidth is traditionally priced
in a pay-as-you-go model. Reflecting the recent trend
of augmenting cloud computing with bandwidth
guarantees, we consider a novel model of cloud
bandwidth allocation and pricing when explicit
bandwidth reservation is enabled. We argue that a
tenant's utility depends not only on its bandwidth
usage, but more importantly on the portion of its
demand that is satisfied with a performance guarantee.
Our objective is to determine the optimal policy for
pricing cloud bandwidth reservations, in order to
maximize social welfare, i.e., the sum of the expected
profits that can be made by all tenants and the cloud
provider, even with the presence of demand uncertainty.
The problem turns out to be a large-scale network
optimization problem with a coupled objective function.
We propose two new distributed solutions --- based on
chaotic equation updates and cutting-plane methods ---
that prove to be more efficient than existing solutions
based on consistency pricing and subgradient methods.
In addition, we address the practical challenge of
forecasting demand statistics, required by our
optimization problem as input. We propose a factor
model for near-future demand prediction, and test it on
a real-world video workload dataset. All included, we
have designed a fully computerized trading environment
for cloud bandwidth reservations, which operates
effectively at a fine granularity of as small as ten
minutes in our trace-driven simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{El-Sayed:2012:TMD,
author = "Nosayba El-Sayed and Ioan A. Stefanovici and George
Amvrosiadis and Andy A. Hwang and Bianca Schroeder",
title = "Temperature management in data centers: why some
(might) like it hot",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "163--174",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254778",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The energy consumed by data centers is starting to
make up a significant fraction of the world's energy
consumption and carbon emissions. A large fraction of
the consumed energy is spent on data center cooling,
which has motivated a large body of work on temperature
management in data centers. Interestingly, a key aspect
of temperature management has not been well understood:
controlling the setpoint temperature at which to run a
data center's cooling system. Most data centers set
their thermostat based on (conservative) suggestions by
manufacturers, as there is limited understanding of how
higher temperatures will affect the system. At the same
time, studies suggest that increasing the temperature
setpoint by just one degree could save 2--5\% of the
energy consumption. This paper provides a multi-faceted
study of temperature management in data centers. We use
a large collection of field data from different
production environments to study the impact of
temperature on hardware reliability, including the
reliability of the storage subsystem, the memory
subsystem and server reliability as a whole. We also
use an experimental testbed based on a thermal chamber
and a large array of benchmarks to study two other
potential issues with higher data center temperatures:
the effect on server performance and power. Based on
our findings, we make recommendations for temperature
management in data centers, that create the potential
for saving energy, while limiting negative effects on
system reliability and performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2012:RCA,
author = "Zhenhua Liu and Yuan Chen and Cullen Bash and Adam
Wierman and Daniel Gmach and Zhikui Wang and Manish
Marwah and Chris Hyser",
title = "Renewable and cooling aware workload management for
sustainable data centers",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "175--186",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, the demand for data center computing has
surged, increasing the total energy footprint of data
centers worldwide. Data centers typically comprise
three subsystems: IT equipment provides services to
customers; power infrastructure supports the IT and
cooling equipment; and the cooling infrastructure
removes heat generated by these subsystems. This work
presents a novel approach to model the energy flows in
a data center and optimize its operation.
Traditionally, supply-side constraints such as energy
or cooling availability were treated independently from
IT workload management. This work reduces electricity
cost and environmental impact using a holistic approach
that integrates renewable supply, dynamic pricing, and
cooling supply including chiller and outside air
cooling, with IT workload planning to improve the
overall sustainability of data center operations.
Specifically, we first predict renewable energy as well
as IT demand. Then we use these predictions to generate
an IT workload management plan that schedules IT
workload and allocates IT resources within a data
center according to time varying power supply and
cooling efficiency. We have implemented and evaluated
our approach using traces from real data centers and
production systems. The results demonstrate that our
approach can reduce both the recurring power costs and
the use of non-renewable energy by as much as 60\%
compared to existing techniques, while still meeting
the Service Level Agreements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2012:ESD,
author = "Di Wang and Chuangang Ren and Anand Sivasubramaniam
and Bhuvan Urgaonkar and Hosam Fathy",
title = "Energy storage in datacenters: what, where, and how
much?",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "187--198",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy storage --- in the form of UPS units --- in a
datacenter has been primarily used to fail-over to
diesel generators upon power outages. There has been
recent interest in using these Energy Storage Devices
(ESDs) for demand-response (DR) to either shift peak
demand away from high tariff periods, or to shave
demand allowing aggressive under-provisioning of the
power infrastructure. All such prior work has only
considered a single/specific type of ESD (typically
re-chargeable lead-acid batteries), and has only
employed them at a single level of the power delivery
network. Continuing technological advances have
provided us a plethora of competitive ESD options
ranging from ultra-capacitors, to different kinds of
batteries, flywheels and even compressed air-based
storage. These ESDs offer very different trade-offs
between their power and energy costs, densities,
lifetimes, and energy efficiency, among other factors,
suggesting that employing hybrid combinations of these
may allow more effective DR than with a single
technology. Furthermore, ESDs can be placed at
different, and possibly multiple, levels of the power
delivery hierarchy with different associated
trade-offs. To our knowledge, no prior work has studied
the extensive design space involving multiple ESD
technology provisioning and placement options. This
paper intends to fill this critical void, by presenting
a theoretical framework for capturing important
characteristics of different ESD technologies, the
trade-offs of placing them at different levels of the
power hierarchy, and quantifying the resulting
cost-benefit trade-offs as a function of workload
properties.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2012:RCU,
author = "Devavrat Shah and Tauhid Zaman",
title = "Rumor centrality: a universal source detector",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "199--210",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of detecting the source of a
rumor (information diffusion) in a network based on
observations about which set of nodes possess the
rumor. In a recent work [10], this question was
introduced and studied. The authors proposed rumor
centrality as an estimator for detecting the source.
They establish it to be the maximum likelihood
estimator with respect to the popular Susceptible
Infected (SI) model with exponential spreading time for
regular trees. They showed that as the size of infected
graph increases, for a line ($2$-regular tree) graph,
the probability of source detection goes to $0$ while
for $d$-regular trees with $ d \geq 3$ the probability
of detection, say \alpha $_d$, remains bounded away
from $0$ and is less than $ 1 / 2$. Their results,
however stop short of providing insights for the
heterogeneous setting such as irregular trees or the SI
model with non-exponential spreading times. This paper
overcomes this limitation and establishes the
effectiveness of rumor centrality for source detection
for generic random trees and the SI model with a
generic spreading time distribution. The key result is
an interesting connection between a multi-type
continuous time branching process (an equivalent
representation of a generalized Polya's urn, cf. [1])
and the effectiveness of rumor centrality. Through
this, it is possible to quantify the detection
probability precisely. As a consequence, we recover all
the results of [10] as a special case and more
importantly, we obtain a variety of results
establishing the universality of rumor centrality in
the context of tree-like graphs and the SI model with a
generic spreading time distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Netrapalli:2012:LGE,
author = "Praneeth Netrapalli and Sujay Sanghavi",
title = "Learning the graph of epidemic cascades",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "211--222",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254783",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of finding the graph on which
an epidemic spreads, given only the times when each
node gets infected. While this is a problem of central
importance in several contexts --- offline and online
social networks, e-commerce, epidemiology --- there has
been very little work, analytical or empirical, on
finding the graph. Clearly, it is impossible to do so
from just one epidemic; our interest is in learning the
graph from a small number of independent epidemics. For
the classic and popular ``independent cascade''
epidemics, we analytically establish sufficient
conditions on the number of epidemics for both the
global maximum-likelihood (ML) estimator, and a natural
greedy algorithm to succeed with high probability. Both
results are based on a key observation: the global
graph learning problem decouples into $n$ local
problems one for each node. For a node of degree $d$,
we show that its neighborhood can be reliably found
once it has been infected $ O(d^2 \log n)$ times (for
ML on general graphs) or $ O(d \log n)$ times (for
greedy on trees). We also provide a corresponding
information-theoretic lower bound of $ \Omega (d \log
n)$; thus our bounds are essentially tight.
Furthermore, if we are given side-information in the
form of a super-graph of the actual graph (as is often
the case), then the number of epidemic samples required
--- in all cases --- becomes independent of the network
size $n$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Milling:2012:NFR,
author = "Chris Milling and Constantine Caramanis and Shie
Mannor and Sanjay Shakkottai",
title = "Network forensics: random infection vs spreading
epidemic",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "223--234",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254784",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer (and human) networks have long had to contend
with spreading viruses. Effectively controlling or
curbing an outbreak requires understanding the dynamics
of the spread. A virus that spreads by taking advantage
of physical links or user-acquaintance links on a
social network can grow explosively if it spreads
beyond a critical radius. On the other hand, random
infections (that do not take advantage of network
structure) have very different propagation
characteristics. If too many machines (or humans) are
infected, network structure becomes essentially
irrelevant, and the different spreading modes appear
identical. When can we distinguish between mechanics of
infection? Further, how can this be done efficiently?
This paper studies these two questions. We provide
sufficient conditions for different graph topologies,
for when it is possible to distinguish between a random
model of infection and a spreading epidemic model, with
probability of misclassification going to zero. We
further provide efficient algorithms that are
guaranteed to work in different regimes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2012:WGB,
author = "Hyojun Kim and Moonkyung Ryu and Umakishore
Ramachandran",
title = "What is a good buffer cache replacement scheme for
mobile flash storage?",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "235--246",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254786",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Smartphones are becoming ubiquitous and powerful. The
Achilles' heel in such devices that limits performance
is the storage. Low-end flash memory is the storage
technology of choice in such devices due to energy,
size, and cost considerations. In this paper, we take a
critical look at the performance of flash on
smartphones for mobile applications. Specifically, we
ask the question whether the state-of-the-art buffer
cache replacement schemes proposed thus far (both
flash-agnostic and flash-aware ones) are the right ones
for mobile flash storage. To answer this question, we
first expose the limitations of current buffer cache
performance evaluation methods, and propose a novel
evaluation framework that is a hybrid between
trace-driven simulation and real implementation of such
schemes inside an operating system. Such an evaluation
reveals some unexpected and surprising insights on the
performance of buffer management schemes that
contradicts conventional wisdom. Armed with this
knowledge, we propose a new buffer cache replacement
scheme called SpatialClock. Using our evaluation
framework, we show the superior performance of
SpatialClock relative to the state-of-the-art for
mobile flash storage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alizadeh:2012:VRL,
author = "Mohammad Alizadeh and Adel Javanmard and Shang-Tse
Chuang and Sundar Iyer and Yi Lu",
title = "Versatile refresh: low complexity refresh scheduling
for high-throughput multi-banked {eDRAM}",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "247--258",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254787",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-banked embedded DRAM (eDRAM) has become
increasingly popular in high-performance systems.
However, the data retention problem of eDRAM is
exacerbated by the larger number of banks and the
high-performance environment in which it is deployed:
The data retention time of each memory cell decreases
while the number of cells to be refreshed increases.
For this, multi-bank designs offer a concurrent refresh
mode, where idle banks can be refreshed concurrently
during read and write operations. However, conventional
techniques such as periodically scheduling
refreshes---with priority given to refreshes in case of
conflicts with reads or writes---have variable
performance, increase read latency, and can perform
poorly in worst case memory access patterns. We propose
a novel refresh scheduling algorithm that is
low-complexity, produces near-optimal throughput with
universal guarantees, and is tolerant to bursty memory
access patterns. The central idea is to decouple the
scheduler into two simple-to-implement modules: one
determines which cell to refresh next and the other
determines when to force an idle cycle in all banks. We
derive necessary and sufficient conditions to guarantee
data integrity for all access patterns, with any given
number of banks, rows per bank, read/write ports and
data retention time. Our analysis shows that there is a
tradeoff between refresh overhead and burst tolerance
and characterizes this tradeoff precisely. The
algorithm is shown to be near-optimal and achieves, for
instance, 76.6\% reduction in worst-case refresh
overhead from the periodic refresh algorithm for a
250MHz eDRAM with 10us retention time and 16 banks each
with 128 rows. Simulations with Apex-Map synthetic
benchmarks and switch lookup table traffic show that VR
can almost completely hide the refresh overhead for
memory accesses with moderate-to-high multiplexing
across memory banks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhattacharya:2012:DLI,
author = "Suparna Bhattacharya and Karthick Rajamani and K.
Gopinath and Manish Gupta",
title = "Does lean imply green?: a study of the power
performance implications of {Java} runtime bloat",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "259--270",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254789",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The presence of software bloat in large flexible
software systems can hurt energy efficiency. However,
identifying and mitigating bloat is fairly effort
intensive. To enable such efforts to be directed where
there is a substantial potential for energy savings, we
investigate the impact of bloat on power consumption
under different situations. We conduct the first
systematic experimental study of the joint
power-performance implications of bloat across a range
of hardware and software configurations on modern
server platforms. The study employs controlled
experiments to expose different effects of a common
type of Java runtime bloat, excess temporary objects,
in the context of the SPECPower\_ssj2008 workload. We
introduce the notion of equi-performance power
reduction to characterize the impact, in addition to
peak power comparisons. The results show a wide
variation in energy savings from bloat reduction across
these configurations. Energy efficiency benefits at
peak performance tend to be most pronounced when bloat
affects a performance bottleneck and non-bloated
resources have low energy-proportionality.
Equi-performance power savings are highest when bloated
resources have a high degree of energy proportionality.
We develop an analytical model that establishes a
general relation between resource pressure caused by
bloat and its energy efficiency impact under different
conditions of resource bottlenecks and energy
proportionality. Applying the model to different
``what-if'' scenarios, we predict the impact of bloat
reduction and corroborate these predictions with
empirical observations. Our work shows that the
prevalent software-only view of bloat is inadequate for
assessing its power-performance impact and instead
provides a full systems approach for reasoning about
its implications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:2012:DFQ,
author = "Seung-Hwan Lim and Jae-Seok Huh and Youngjae Kim and
Galen M. Shipman and Chita R. Das",
title = "{D}-factor: a quantitative model of application
slow-down in multi-resource shared systems",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "271--282",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254790",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling multiple jobs onto a platform enhances
system utilization by sharing resources. The benefits
from higher resource utilization include reduced cost
to construct, operate, and maintain a system, which
often include energy consumption. Maximizing these
benefits, while satisfying performance limits, comes at
a price --- resource contention among jobs increases
job completion time. In this paper, we analyze
slow-downs of jobs due to contention for multiple
resources in a system; referred to as dilation factor.
We observe that multiple-resource contention creates
non-linear dilation factors of jobs. From this
observation, we establish a general quantitative model
for dilation factors of jobs in multi-resource systems.
A job is characterized by a vector-valued loading
statistics and dilation factors of a job set are given
by a quadratic function of their loading vectors. We
demonstrate how to systematically characterize a job,
maintain the data structure to calculate the dilation
factor (loading matrix), and calculate the dilation
factor of each job. We validated the accuracy of the
model with multiple processes running on a native Linux
server, virtualized servers, and with multiple
MapReduce workloads co-scheduled in a cluster.
Evaluation with measured data shows that the D-factor
model has an error margin of less than 16\%. We also
show that the model can be integrated with an existing
on-line scheduler to minimize the makespan of
workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yoo:2012:AAD,
author = "Wucherl Yoo and Kevin Larson and Lee Baugh and
Sangkyum Kim and Roy H. Campbell",
title = "{ADP}: automated diagnosis of performance pathologies
using hardware events",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "283--294",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254791",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance characterization of applications' hardware
behavior is essential for making the best use of
available hardware resources. Modern architectures
offer access to many hardware events that are capable
of providing information to reveal architectural
performance bottlenecks throughout the core and memory
hierarchy. These events can provide programmers with
unique and powerful insights into the causes of the
resource bottlenecks in their applications. However,
interpreting these events has been a significant
challenge. We present an automated system that uses
machine learning to identify an application's
performance problems. Our system provides programmers
with insights about the performance of their
applications while shielding them from the onerous task
of digesting hardware events. It uses a decision tree
algorithm, random forests on our micro-benchmarks to
fingerprint the performance problems. Our system
divides a profiled application into functions and
automatically classifies each function by the dominant
hardware resource bottlenecks. Using the
classifications from the hotspot functions, we were
able to achieve an average speedup of 1.73 from three
applications in the PARSEC benchmark suite. Our system
provides programmers with a guideline of where, what,
and how to fix the detected performance problems in
applications, which would have otherwise required
considerable architectural knowledge.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2012:PFS,
author = "Di Xu and Chenggang Wu and Pen-Chung Yew and Jianjun
Li and Zhenjiang Wang",
title = "Providing fairness on shared-memory multiprocessors
via process scheduling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "295--306",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254792",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Competition for shared memory resources on
multiprocessors is the most dominant cause for slowing
down applications and makes their performance varies
unpredictably. It exacerbates the need for Quality of
Service (QoS) on such systems. In this paper, we
propose a fair-progress process scheduling (FPS) policy
to improve system fairness. Its strategy is to force
the equally-weighted applications to have the same
amount of slowdown when they run concurrently. The
basic approach is to monitor the progress of all
applications at runtime. When we find an application
suffered more slowdown and accumulated less effective
work than others, we allocate more CPU time to give it
a better parity. Our policy also allows different
weights to different threads, and provides an effective
and robust tuner that allows the OS to freely make
tradeoffs between system fairness and higher
throughput. Evaluation results show that FPS can
significantly improve system fairness by an average of
53.5\% and 65.0\% on a 4-core processor with a private
cache and a 4-core processor with a shared cache,
respectively. The penalty is about 1.1\% and 1.6\% of
the system throughput. For memory-intensive workloads,
FPS also improves system fairness by an average of
45.2\% and 21.1\% on 4-core and 8-core system
respectively at the expense of a throughput loss of
about 2\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Figueiredo:2012:CCT,
author = "Daniel Figueiredo and Philippe Nain and Bruno Ribeiro
and Edmundo {de Souza e Silva} and Don Towsley",
title = "Characterizing continuous time random walks on time
varying graphs",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "307--318",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254794",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the behavior of a continuous
time random walk (CTRW) on a stationary and ergodic
time varying dynamic graph. We establish conditions
under which the CTRW is a stationary and ergodic
process. In general, the stationary distribution of the
walker depends on the walker rate and is difficult to
characterize. However, we characterize the stationary
distribution in the following cases: (i) the walker
rate is significantly larger or smaller than the rate
in which the graph changes (time-scale separation),
(ii) the walker rate is proportional to the degree of
the node that it resides on (coupled dynamics), and
(iii) the degrees of node belonging to the same
connected component are identical (structural
constraints). We provide examples that illustrate our
theoretical findings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:BRW,
author = "Chul-Ho Lee and Xin Xu and Do Young Eun",
title = "Beyond random walk and {Metropolis--Hastings}
samplers: why you should not backtrack for unbiased
graph sampling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "319--330",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254795",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph sampling via crawling has been actively
considered as a generic and important tool for
collecting uniform node samples so as to consistently
estimate and uncover various characteristics of complex
networks. The so-called simple random walk with
re-weighting (SRW-rw) and Metropolis--Hastings (MH)
algorithm have been popular in the literature for such
unbiased graph sampling. However, an unavoidable
downside of their core random walks --- slow diffusion
over the space, can cause poor estimation accuracy. In
this paper, we propose non-backtracking random walk
with re-weighting (NBRW-rw) and MH algorithm with
delayed acceptance (MHDA) which are theoretically
guaranteed to achieve, at almost no additional cost,
not only unbiased graph sampling but also higher
efficiency (smaller asymptotic variance of the
resulting unbiased estimators) than the SRW-rw and the
MH algorithm, respectively. In particular, a remarkable
feature of the MHDA is its applicability for any
non-uniform node sampling like the MH algorithm, but
ensuring better sampling efficiency than the MH
algorithm. We also provide simulation results to
confirm our theoretical findings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Song:2012:CEM,
author = "Han Hee Song and Berkant Savas and Tae Won Cho and
Vacha Dave and Zhengdong Lu and Inderjit S. Dhillon and
Yin Zhang and Lili Qiu",
title = "Clustered embedding of massive social networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "331--342",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254796",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The explosive growth of social networks has created
numerous exciting research opportunities. A central
concept in the analysis of social networks is a
proximity measure, which captures the closeness or
similarity between nodes in the network. Despite much
research on proximity measures, there is a lack of
techniques to efficiently and accurately compute
proximity measures for large-scale social networks. In
this paper, we embed the original massive social graph
into a much smaller graph, using a novel dimensionality
reduction technique termed Clustered Spectral Graph
Embedding. We show that the embedded graph captures the
essential clustering and spectral structure of the
original graph and allow a wide range of analysis to be
performed on massive social graphs. Applying the
clustered embedding to proximity measurement of social
networks, we develop accurate, scalable, and flexible
solutions to three important social network analysis
tasks: proximity estimation, missing link inference,
and link prediction. We demonstrate the effectiveness
of our solutions to the tasks in the context of large
real-world social network datasets: Flickr,
LiveJournal, and MySpace with up to 2 million nodes and
90 million links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2012:DLN,
author = "Edith Cohen and Graham Cormode and Nick Duffield",
title = "Don't let the negatives bring you down: sampling from
streams of signed updates",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "343--354",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254798",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random sampling has been proven time and time again to
be a powerful tool for working with large data. Queries
over the full dataset are replaced by approximate
queries over the smaller (and hence easier to store and
manipulate) sample. The sample constitutes a flexible
summary that supports a wide class of queries. But in
many applications, datasets are modified with time, and
it is desirable to update samples without requiring
access to the full underlying datasets. In this paper,
we introduce and analyze novel techniques for sampling
over dynamic data, modeled as a stream of modifications
to weights associated with each key. While sampling
schemes designed for stream applications can often
readily accommodate positive updates to the dataset,
much less is known for the case of negative updates,
where weights are reduced or items deleted altogether.
We primarily consider the turnstile model of streams,
and extend classic schemes to incorporate negative
updates. Perhaps surprisingly, the modifications to
handle negative updates turn out to be natural and
seamless extensions of the well-known positive
update-only algorithms. We show that they produce
unbiased estimators, and we relate their performance to
the behavior of corresponding algorithms on insert-only
streams with different parameters. A careful analysis
is necessitated, in order to account for the fact that
sampling choices for one key now depend on the choices
made for other keys. In practice, our solutions turn
out to be efficient and accurate. Compared to recent
algorithms for L$_p$ sampling which can be applied to
this problem, they are significantly more reliable, and
dramatically faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ammar:2012:ERA,
author = "Ammar Ammar and Devavrat Shah",
title = "Efficient rank aggregation using partial data",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "355--366",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254799",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The need to rank items based on user input arises in
many practical applications such as elections, group
decision making and recommendation systems. The primary
challenge in such scenarios is to decide on a global
ranking based on partial preferences provided by users.
The standard approach to address this challenge is to
ask users to provide explicit numerical ratings
(cardinal information) of a subset of the items. The
main appeal of such an approach is the ease of
aggregation. However, the rating scale as well as the
individual ratings are often arbitrary and may not be
consistent from one user to another. A more natural
alternative to numerical ratings requires users to
compare pairs of items (ordinal information). On the
one hand, such comparisons provide an ``absolute''
indicator of the user's preference. On the other hand,
it is often hard to combine or aggregate these
comparisons to obtain a consistent global ranking. In
this work, we provide a tractable framework for
utilizing comparison data as well as first-order
marginal information (see Section 2) for the purpose of
ranking. We treat the available information as partial
samples from an unknown distribution over permutations.
We then reduce ranking problems of interest to
performing inference on this distribution.
Specifically, we consider the problems of (a) finding
an aggregate ranking of $n$ items, (b) learning the
mode of the distribution, and (c) identifying the top
$k$ items. For many of these problems, we provide
efficient algorithms to infer the ranking directly from
the data without the need to estimate the underlying
distribution. In other cases, we use the Principle of
Maximum Entropy to devise a concise parameterization of
a distribution consistent with observations using only
O(n$^2$ ) parameters, where $n$ is the number of items
in question. We propose a distributed, iterative
algorithm for estimating the parameters of the
distribution. We establish the correctness of the
algorithm and identify its rate of convergence
explicitly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Duffield:2012:FSA,
author = "Nick Duffield",
title = "Fair sampling across network flow measurements",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "367--378",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254800",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sampling is crucial for controlling resource
consumption by internet traffic flow measurements.
Routers use Packet Sampled NetFlow, and completed flow
records are sampled in the measurement infrastructure.
Recent research, motivated by the need of service
providers to accurately measure both small and large
traffic subpopulations, has focused on distributing a
packet sampling budget amongst subpopulations. But long
timescales of hardware development and lower bandwidth
costs motivate post-measurement analysis of complete
flow records at collectors instead. Sampling in
collector databases then manages data volumes, yielding
general purpose summaries that are rapidly queried to
trigger drill-down analysis on a time limited window of
full data. These are sufficiently small to be archived.
This paper addresses the problem of distributing a
sampling budget over subpopulations of flow records.
Estimation accuracy goals are met by fairly sharing the
budget. We establish a correspondence between the type
of accuracy goal, and the flavor of fair sharing used.
A streaming Max-Min Fair Sampling algorithm fairly
shares the sampling budget across subpopulations, with
sampling as a mechanism to deallocate budget. This
provides timely samples and is robust against
uncertainties in configuration and demand. We
illustrate using flow records from an access router of
a large ISP, where rates over interface traffic
subpopulations vary over several orders of magnitude.
We detail an implementation whose computational cost is
no worse than subpopulation-oblivious sampling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peng:2012:TBN,
author = "Kunyang Peng and Qunfeng Dong",
title = "{TCAM}-based {NFA} implementation",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "379--380",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Regular expression matching as the core packet
inspection engine of network systems has long been
striving to be both fast in matching speed (like DFA)
and scalable in storage space (like NFA). Recently,
ternary content addressable memory (TCAM) has been
investigated as a promising way out, by implementing
DFA using TCAM for regular express matching. In this
paper, we present the first method for implementing NFA
using TCAM. Through proper TCAM encoding, our method
matches each input byte with one single TCAM lookup ---
operating at precisely the same speed as DFA, while
using a number of TCAM entries that can be close to NFA
size. These properties make our method an important
step along a new path --- TCAM-based NFA implementation
--- towards the long-standing goal of fast and scalable
regular expression matching.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anshelevich:2012:SEP,
author = "Elliot Anshelevich and Ameya Hate and Koushik Kar and
Michael Usher",
title = "Stable and efficient pricing for inter-domain traffic
forwarding",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "381--382",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We address the question of strategic pricing of
inter-domain traffic forwarding services provided by
ISPs, which is also closely coupled with the question
of how ISPs route their traffic towards their
neighboring ISPs. Posing this question as a
non-cooperative game between neighboring ISPs, we study
the properties of this pricing game in terms of the
existence and efficiency of the equilibrium. We observe
that for ``well-provisioned'' ISPs, Nash equilibrium
prices exist and they result in flows that maximize the
overall network utility (generalized end-to-end
throughput). For general ISP topologies, equilibrium
prices may not exist; however, simulations on a large
number of realistic topologies show that best-response
based simple price update solutions converge to stable
and efficient prices and flows for most topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DiCioccio:2012:MCH,
author = "Lucas DiCioccio and Renata Teixeira and Catherine
Rosenberg",
title = "Measuring and characterizing home networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "383--384",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the design and evaluation of
HomeNet Profiler, a tool that runs on an end-system in
the home to collect data from home networks. HomeNet
Profiler collects a wide range of measurements
including: the set of devices, the set of services
(with UPnP and Zeroconf), and the characteristics of
the WiFi environment. Since the release of HomeNet
Profiler in April 2011, we have collected data from
over 2,400 distinct homes in 46 different countries.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sommers:2012:CMA,
author = "Joel Sommers and Paul Barford",
title = "Comparing metro-area cellular and {WiFi} performance:
extended abstract",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "385--386",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cellular and 802.11 WiFi offer two compelling
connectivity options for mobile users. The goal of our
work is to better understand performance
characteristics of these technologies in diverse
environments and conditions. To that end, we compare
and contrast cellular and Wifi performance using
crowd-sourced data from speedtest.net. We consider
spatio-temporal performance aspects (e.g., upload and
download throughput and latency) using over 3 million
user-initiated tests initiated in 15 different metro
areas, collected over 15 weeks. In these preliminary
results, we find that WiFi performance generally
exceeds cellular performance, and that observed
characteristics are highly variable across different
locations and times of day. We also observe diverse
performance characteristics resulting from the rollout
of new cell access technologies and service differences
among local providers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nemeth:2012:TSC,
author = "G{\'a}bor N{\'e}meth and G{\'a}bor R{\'e}tv{\'a}ri",
title = "Towards a statistical characterization of the
competitiveness of oblivious routing",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "387--388",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Oblivious routing asks for a static routing that
serves arbitrary user demands with minimal performance
penalty. Performance is measured in terms of the
competitive ratio, the proportion of the maximum
congestion to the best possible congestion. In this
paper, we take the first steps towards extending this
worst-case characterization to a more revealing
statistical one. We define new performance metrics and
we present numerical evaluations showing that, in
statistical terms, oblivious routing is not as
competitive as the worst-case performance
characterizations would suggest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zarifzadeh:2012:RT,
author = "Sajjad Zarifzadeh and Madhwaraj G. K. and Constantine
Dovrolis",
title = "Range tomography",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "389--390",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254807",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:SAM,
author = "Myungjin Lee and Nick Duffield and Ramana Rao
Kompella",
title = "A scalable architecture for maintaining packet latency
measurements",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "391--392",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Latency has become an important metric for network
monitoring since the emergence of new latency-sensitive
applications (e.g., algorithmic trading and
high-performance computing). In this paper, to provide
latency measurements at both finer (e.g., packet) as
well as flexible (e.g., flow subsets) levels of
granularity, we propose an architecture called MAPLE
that essentially stores packet-level latencies in
routers and allows network operators to query the
latency of arbitrary traffic sub-populations. MAPLE is
built using a scalable data structure called SVBF with
small storage needs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Laner:2012:MRN,
author = "Markus Laner and Philipp Svoboda and Markus Rupp",
title = "Modeling randomness in network traffic",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "393--394",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254809",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A continuous challenge in the field of network traffic
modeling is to map recorded traffic onto parameters of
random processes, in order to enable simulations of the
respective traffic. A key element thereof is a
convenient model which is simple, yet, captures the
most relevant statistics. This work aims to find such a
model which, more precisely, enables the generation of
multiple random processes with arbitrary but jointly
characterized distributions, auto-correlation functions
and cross-correlations. Hence, we present the
definition of a novel class of models, the derivation
of a respective closed-form analytical representation
and its application on real network traffic. Our
modeling approach comprises: (i) generating statistical
dependent Gaussian random processes, (ii) introducing
auto-correlation to each process with a linear filter
and, (iii) transforming them sample-wise by real-valued
polynomial functions in order to shape their
distributions. This particular structure allows to
split the parameter fitting problem into three
independent parts, each of which solvable by standard
methods. Therefore, it is simple and straightforward to
fit the model to measurement data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gallo:2012:PER,
author = "Massimo Gallo and Bruno Kauffmann and Luca Muscariello
and Alain Simonian and Christian Tanguy",
title = "Performance evaluation of the random replacement
policy for networks of caches",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "395--396",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Caching is a key component for Content Distribution
Networks and new Information-Centric Network
architectures. In this paper, we address performance
issues of caching networks running the RND replacement
policy. We first prove that when the popularity
distribution follows a general power-law with decay
exponent $ \alpha > 1 $, the miss probability is
asymptotic to $ O(C^{1 - \alpha }) $ for large cache
size $C$. We further evaluate network of caches under
RND policy for homogeneous tree networks and extend the
analysis to tandem cache networks where caches employ
either LRU or RND policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:2012:SCT,
author = "Koyel Mukherjee and Samir Khuller and Amol Deshpande",
title = "Saving on cooling: the thermal scheduling problem",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "397--398",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254811",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bodas:2012:CCM,
author = "Shreeshankar Bodas and Devavrat Shah and Damon
Wischik",
title = "Congestion control meets medium access: throughput,
delay, and complexity",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "399--400",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254812",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper looks at the problem of designing medium
access algorithm for wireless networks with the
objective of providing high throughput and low delay
performance to the users, while requiring only a modest
computational effort at the transmitters and receivers.
Additive inter-user interference at the receivers is an
important physical layer characteristic of wireless
networks. Today's Wi-Fi networks are based upon the
abstraction of physical layer where inter-user
interference is considered as noise leading to the
'collision' model in which users are required to
co-ordinate their transmissions through Carrier Sensing
Multiple Access (CSMA)-based schemes to avoid
interference. This, in turn, leads to an inherent
performance trade-off [1]: it is impossible to obtain
high throughput and low delay by means of low
complexity medium access algorithm (unless P=NP). As
the main result, we establish that this trade-off is
primarily due to treating interference as noise in the
current wireless architecture. Concretely, we develop a
simple medium access algorithm that allows for
simultaneous transmissions of users to the same
receiver by performing joint decoding at receivers,
over time. For a receiver to be able to decode multiple
transmissions quickly enough, we develop appropriate
congestion control where each transmitter maintains a
``window'' of undecoded transmitted data that is
adjusted based upon the ``feedback'' from the receiver.
In summary, this provides an efficient, low complexity
``online'' code operating at varying rate, and the
system as a whole experiences only small amount of
delay (including decoding time) while operating at high
throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tantawi:2012:OCP,
author = "Asser N. Tantawi",
title = "Optimized cloud placement of virtual clusters using
biased importance sampling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "401--402",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce an algorithm for the placement of
constrained, networked virtual clusters in the cloud,
that is based on importance sampling (also known as
cross-entropy). Rather than using a straightforward
implementation of such a technique, which proved
inefficient, we considerably enhance the method by
biasing the sampling process to incorporate
communication needs and other constraints of placement
requests to yield an efficient algorithm that is linear
in the size of the cloud. We investigate the quality of
the results of using our algorithm on a simulated
cloud.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shen:2012:PEC,
author = "Kai Shen and Arrvindh Shriraman and Sandhya Dwarkadas
and Xiao Zhang",
title = "Power and energy containers for multicore servers",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "403--404",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Power capping and energy efficiency are critical
concerns in server systems, particularly when serving
dynamic workloads on resource-sharing multicores. We
present a new operating system facility (power and
energy containers) that accounts for and controls the
power/energy usage of individual fine-grained server
requests. This facility is enabled by novel techniques
for multicore power attribution to concurrent tasks,
measurement/modeling alignment to enhance
predictability, and request power accounting and
control.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2012:CIW,
author = "Kai Wang and Minghong Lin and Florin Ciucu and Adam
Wierman and Chuang Lin",
title = "Characterizing the impact of the workload on the value
of dynamic resizing in data centers",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "405--406",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy consumption imposes a significant cost for data
centers; yet much of that energy is used to maintain
excess service capacity during periods of predictably
low load. Resultantly, there has recently been interest
in developing designs that allow the service capacity
to be dynamically resized to match the current
workload. However, there is still much debate about the
value of such approaches in real settings. In this
paper, we show that the value of dynamic resizing is
highly dependent on statistics of the workload process.
In particular, both slow time-scale non-stationarities
of the workload (e.g., the peak-to-mean ratio) and the
fast time-scale stochasticity (e.g., the burstiness of
arrivals) play key roles. To illustrate the impact of
these factors, we combine optimization-based modeling
of the slow time-scale with stochastic modeling of the
fast time scale. Within this framework, we provide both
analytic and numerical results characterizing when
dynamic resizing does (and does not) provide
benefits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2012:PLSa,
author = "Yue Tan and Yingdong Lu and Cathy H. Xia",
title = "Provisioning for large scale cloud computing
services",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "407--408",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Resource provisioning, the task of planning sufficient
amounts of resources to meet service level agreements,
has become an important management task in emerging
cloud computing services. In this paper, we present a
stochastic modeling approach to guide the resource
provisioning task for future service clouds as the
demand grows large. We focus on on-demand services and
consider service availability as the key quality of
service constraint. A specific scenario under
consideration is when resources can be measured in base
instances. We develop an asymptotic provisioning
methodology that utilizes tight performance bounds for
the Erlang loss system to determine the minimum
capacity levels that meet the service availability
requirements. We show that our provisioning solutions
are not only asymptotically exact but also provide
better QoS guarantees at all load conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Narayana:2012:DWA,
author = "Srinivas Narayana and Joe Wenjie Jiang and Jennifer
Rexford and Mung Chiang",
title = "Distributed wide-area traffic management for cloud
services",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "409--410",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of interactive cloud services depends
heavily on which data centers handle client requests,
and which wide-area paths carry traffic. While making
these decisions, cloud service providers also need to
weigh operational considerations like electricity and
bandwidth costs, and balancing server loads across
replicas. We argue that selecting data centers and
network routes independently, as is common in today's
services, can lead to much lower performance or higher
costs than a coordinated decision. However,
fine-grained joint control of two large distributed
systems---e.g., DNS-based replica-mapping and data
center multi-homed routing---can be administratively
challenging. In this paper, we introduce the design of
a system that jointly optimizes replica-mapping and
multi-homed routing, while retaining the functional
separation that exists between them today. We show how
to construct a provably optimal distributed solution
implemented through local computations and message
exchanges between the mapping and routing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dixit:2012:EFG,
author = "Advait Abhay Dixit and Pawan Prakash and Ramana Rao
Kompella and Charlie Hu",
title = "On the efficacy of fine-grained traffic splitting
protocols in data center networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "411--412",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current multipath routing techniques split traffic at
a per-flow level because, according to conventional
wisdom, forwarding packets of a TCP flow along
different paths leads to packet reordering which is
detrimental to TCP. In this paper, we revisit this
``myth'' in the context of cloud data center networks
which have regular topologies such as multi-rooted
trees. We argue that due to the symmetry in the
multiple equal-cost paths in such networks, simply
spraying packets of a given flow among all equal-cost
paths, leads to balanced queues across multiple paths,
and consequently little packet reordering. Using a
testbed comprising of NetFPGA switches, we show how
cloud applications benefit from better network
utilization in data centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Frank:2012:CAT,
author = "Benjamin Frank and Ingmar Poese and Georgios
Smaragdakis and Steve Uhlig and Anja Feldmann",
title = "Content-aware traffic engineering",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "413--414",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent studies show that a large fraction of Internet
traffic is originated by Content Providers (CPs) such
as content distribution networks and hyper-giants. To
cope with the increasing demand for content, CPs deploy
massively distributed server infrastructures. Thus,
content is available in many network locations and can
be downloaded by traversing different paths in a
network. Despite the prominent server location and path
diversity, the decisions on how to map users to servers
by CPs and how to perform traffic engineering by ISPs,
are independent. This leads to a lose-lose situation as
CPs are not aware about the network bottlenecks nor the
location of end-users, and the ISPs struggle to cope
with rapid traffic shifts caused by the dynamic CP
server selection process. In this paper we propose and
evaluate Content-aware Traffic Engineering (CaTE),
which dynamically adapts the traffic demand for content
hosted on CPs by utilizing ISP network information and
end-user location during the server selection process.
This leads to a win-win situation because CPs are able
to enhance their end-user to server mapping and ISPs
gain the ability to partially influence the traffic
demands in their networks. Indeed, our results using
traces from a Tier-1 ISP show that a number of network
metrics can be improved when utilizing CaTE.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:2012:UPA,
author = "Jian Hu and Hong Jiang and Prakash Manden",
title = "Understanding performance anomalies of {SSDs} and
their impact in enterprise application environment",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "415--416",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SSD is known to have the erase-before-write and
out-of-place update properties. When the number of
invalidated pages is more than a given threshold, a
process referred to as garbage collection (GC) is
triggered to erase blocks after valid pages in these
blocks are copied somewhere else. GC degrades both the
performance and lifetime of SSD significantly because
of the read-write-erase operation sequence. In this
paper, we conduct intensive experiments on a 120GB
Intel 320 SATA SSD and a 320GB Fusion IO ioDrive PCI-E
SSD to show and analyze the following important
performance issues and anomalies. The commonly accepted
knowledge that the performance drops sharply as more
data is being written is not always true. This is
because GC efficiency, a more important factor
affecting SSD performance, has not been carefully
considered. It is defined as the percentage of invalid
pages of a GC erased block. It is possible to avoid the
performance degradation by managing the addressable LBA
range. Estimating the residual lifetime of an SSD is a
very challenging problem because it involves several
interdependent and mutually interacting factors such as
FTL, GC, wear leveling, workload characteristics, etc.
We develop an analytical model to estimate the residual
lifetime of a given SSD. The high random-read
performance is widely accepted as one of the advantages
of SSD. We will show that this is not true if the GC
efficiency is low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glatz:2012:CIO,
author = "Eduard Glatz and Xenofontas Dimitropoulos",
title = "Classifying {Internet} one-way traffic",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "417--418",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we analyze a massive data-set that
captures 5.23 petabytes of traffic to shed light into
the composition of one-way traffic towards a large
network based on a novel one-way traffic classifier. We
find that one-way traffic makes a very large fraction
of all traffic in terms of flows, it can be primarily
attributed to malicious causes, and it has declined
since 2004 because of relative decrease of scan
traffic. In addition, we show how our classifier is
useful for detecting network outages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arora:2012:FCE,
author = "Manish Arora and Feng Wang and Bob Rychlik and Dean
Tullsen",
title = "Fast cost efficient designs by building upon the
{Plackett} and {Burman} method",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "419--420",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "CPU processor design involves a large set of
increasingly complex design decisions, and simulating
all possible designs is typically not feasible.
Sensitivity analysis, a commonly used technique, can be
dependent on the starting point of the design and does
not necessarily account for the cost of each parameter.
This work proposes a method to simultaneously analyzes
multiple parameters with a small number of experiments
by leveraging the Plackett and Burman (P\&B) analysis
method. It builds upon the technique in two specific
ways. It allows a parameter to take multiple values and
replaces the unit-less impact factor with
cost-proportional values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keller:2012:MHN,
author = "Matthias Keller and Jan Beutel and Lothar Thiele",
title = "Multi-hop network tomography: path reconstruction and
per-hop arrival time estimation from partial
information",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "421--422",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the context of low-power wireless sensor networks,
this paper presents multi-hop network tomography (MNT),
a novel, non-intrusive algorithm for reconstructing the
path, the per-hop arrival order, and the per-hop
arrival time of individual packets at runtime. While
explicitly transmitting this information over the radio
would negatively impact the performance of the system
under investigation, information is instead
reconstructed after packets have been received at the
sink.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papapanagiotou:2012:SVL,
author = "Ioannis Papapanagiotou and Erich M. Nahum and
Vasileios Pappas",
title = "Smartphones vs. laptops: comparing {Web} browsing
behavior and the implications for caching",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "423--424",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we present the differences and
similarities of the web browsing behavior in most
common mobile platforms. We devise a novel Operating
System (OS) fingerprinting methodology to distinguish
different types of wireless devices (smartphone vs
laptops) as well as operating system instances (iOS,
Android, BlackBerry etc.). We showcase that most of the
multimedia content in smartphone devices is delivered
via Range-Requests, and a large portion of the video
transfers are aborted. We also show that laptop devices
have more intelligent browser caching capabilities. We
investigate the impact of an additional browser cache,
and demonstrate that a 10MB browser cache that is able
to handle partial downloads in smartphones would be
enough to handle the majority of the savings. Finally,
we showcase that caching policies need to be amended to
attain the maximum possible savings in proxy caches.
Based on those optimizations the emulated proxy cache
provides 10\%--20\% in bandwidth savings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reinecke:2012:MMV,
author = "Philipp Reinecke and Mikl{\'o}s Telek and Katinka
Wolter",
title = "Micro and macro views of discrete-state {Markov}
models and their application to efficient simulation
with phase-type distributions",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "425--426",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertran:2012:PFB,
author = "Ramon Bertran and Marc Gonz{\`a}lez and Xavier
Martorell and Nacho Navarro and Eduard Ayguad{\'e}",
title = "{POTRA}: a framework for building power models for
next generation multicore architectures",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "427--428",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254827",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hayden:2012:BTS,
author = "Richard A. Hayden",
title = "Basic theory and some applications of martingales",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "429--430",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial surveys the fundamental results of the
theory of martingales from the perspective of the
performance engineer. We will present the fundamental
results and illustrate their power through simple and
elegant proofs of important and well-known results in
performance analysis. The remainder of the tutorial
will introduce the martingale functional central limit
theorem and semi-martingale decomposition methodology
for the characterization and proof of heavy-traffic
limit results for Markovian queueing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:2012:AML,
author = "Edmundo {de Souza e Silva} and Daniel Sadoc Menasche",
title = "Applications of machine learning to performance
evaluation",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "431--432",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aikat:2012:INE,
author = "Jay Aikat and Kevin Jeffay",
title = "Introduction to network experiments using the {GENI}
cyberinfrastructure",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "433--434",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this tutorial, we will introduce the
SIGMETRICS/Performance community to the vast testbeds,
tools and resources openly available through the GENI
(Global Environment for Network Innovations) project.
We will present details about the distributed computing
resources available on GENI for researchers interested
in simulation as well as measurement-based performance
evaluation experiments. We will demonstrate simple
experiments on GENI, and leave them with information on
how to run experiments for research and education using
GENI resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eriksson:2012:PLA,
author = "Brian Eriksson and Paul Barford and Bruce Maggs and
Robert Nowak",
title = "Posit: a lightweight approach for {IP} geolocation",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "2--11",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381058",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Location-specific Internet services are predicated on
the ability to identify the geographic position of IP
hosts accurately. Fundamental to current
state-of-the-art geolocation techniques is reliance on
heavyweight traceroute-like probes that put a
significant traffic load on networks. In this paper, we
introduce a new lightweight approach to IP geolocation
that we call Posit. This methodology requires only a
small number of delay measurements conducted to end
host targets in conjunction with a
computationally-efficient statistical embedding
technique. We demonstrate that Posit performs better
than all existing geolocation tools across a wide
spectrum of measurement infrastructures with varying
geographic densities. Specifically, Posit is shown to
geolocate hosts with median error improvements of over
55\% with respect to all current measurement-based IP
geolocation methodologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coucheney:2012:CSE,
author = "Pierre Coucheney and Patrick Maill{\'e} and Bruno
Tuffin",
title = "Comparison of search engines non-neutral and neutral
behaviors",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "14--17",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network neutrality has recently attracted a lot of
attention but search neutrality is also becoming a
vivid subject of discussion because a non-neutral
search may prevent some relevant content from being
accessed by users. We propose in this paper to model
two situations of a non-neutral search engine behavior,
which can rank the link propositions according to the
profit a search can generate for it instead of just
relevance: the case when the search engine owns some
content, and the case when it imposes a tax on organic
links, a bit similarly to what it does for commercial
links. We analyze the particular (and deterministic)
situation of a single keyword, and describe the problem
for the whole potential set of keywords.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hanawal:2012:GTA,
author = "Manjesh K. Hanawal and Eitan Altman and Rajesh
Sundaresan",
title = "Game theoretic analysis of collusions in nonneutral
networks",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "18--21",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the impact of exclusive contracts
between a content provider (CP) and an internet service
provider (ISP) in a nonneutral network. We consider a
simple linear demand function for the CPs. We study
when an exclusive contract is beneficial to the
colluding pair and evaluate its impact on the
noncolluding players at equilibrium. For the case of
two CPs and one ISP we show that collusion may not
always be beneficial. We derive an explicit condition
in terms of the advertisement revenues of the CPs that
tells when a collusion is profitable to the colluding
entities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2012:GUW,
author = "Seung Min Yu and Seong-Lyun Kim",
title = "Guaranteeing user welfare in network service:
comparison of two subsidy schemes",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "22--25",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381062",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the emergence of smart devices, mobile data
traffic grows exponentially. A Cisco report predicts
that global mobile data traffic will increase 26-fold
between 2010 and 2015. Therefore, the spectrum shortage
continues and the spectrum price increases, which will
eventually lead to decrease of user welfare. Another
side effect of the data traffic growth is the
polarization of data traffic among users. To resolve
these problems, we introduce two subsidy schemes (i.e.,
price and quality of service (QoS) subsidy schemes) and
mathematically analyze the effect of each scheme. We
identify that if the regulator has sufficient spectrum
amount for the network service, then the QoS subsidy
scheme will be a good choice for all players in the
network service market. Otherwise, the price subsidy
scheme can be better from user welfare perspective.
Even though our analytic results are derived under some
assumptions for mathematical tractability, it will
provide good intuitions for spectrum regulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:2012:NMC,
author = "R. Berry and M. Honig and T. Nguyen and V. Subramanian
and H. Zhou and R. Vohra",
title = "Newsvendor model of capacity sharing",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "26--29",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2012:PDK,
author = "Richard T. B. Ma and Dah Ming Chiu and John C. S. Lui
and Vishal Misra and Dan Rubenstein",
title = "Price differentiation in the {Kelly} mechanism",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "30--33",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Houidi:2012:PTB,
author = "Zied Ben Houidi and Helia Pouyllau",
title = "The price of tussles: bankrupt in cyberspace?",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "34--37",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381065",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lodhi:2012:PSA,
author = "Aemen Lodhi and Amogh Dhamdhere and Constantine
Dovrolis",
title = "Peering strategy adoption by transit providers in the
{Internet}: a game theoretic approach?",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "38--41",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381066",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mastroeni:2012:PIP,
author = "Loretta Mastroeni and Maurizio Naldi",
title = "Pricing of insurance policies against cloud storage
price rises",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "42--45",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381067",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When a company migrates to cloud storage, the way back
is neither fast nor cheap. The company is then locked
up in the storage contract and exposed to upward market
prices, which reduce the company's profit and may even
bring it below zero. We propose a protection means
based on an insurance contract, by which the cloud
purchaser is indemnified when the current storage price
exceeds a pre-defined threshold. By applying the
financial options theory, we provide a formula for the
insurance price (the premium). By using historical data
on market prices for disks, we apply the formula in
realistic scenarios. We show that the premium grows
nearly quadratically with the duration of the coverage
period as long as this is below one year, but grows
more slowly, though faster than linearly, over longer
coverage periods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:IVI,
author = "Dongmyung Lee and Jeonghoon Mo and Jinwoo Park",
title = "{ISP} vs. {ISP $+$ CDN}: can {ISPs} in duopoly profit
by introducing {CDN} services?",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "46--48",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381068",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper provides an economic analysis of the
ISP-operated CDN under a duopolistic competition. The
two ISPs are modeled as a platform in a two-sided
market providing Internet access to both content
providers and consumers. By formulating a 4-level
Stackelberg game, we have found that the equilibrium
strategy of an ISP in determining whether to launch CDN
service depends on the marginal cost of cache server
deployment and the two contrary effects: ``Competition
Effect'' and ``Delay Reduction Effect.''",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulyas:2012:GNF,
author = "Andr{\'a}s Guly{\'a}s and Attila Kor{\"o}si and
D{\'a}vid Szab{\'o} and Gergely Bicz{\'o}k",
title = "On greedy network formation",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "49--52",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381069",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Greedy navigability is a central issue in the theory
of networks. However, the exogenous nature of network
models do not allow for describing how greedy
routable-networks emerge in reality. In turn, network
formation games focus on the very emergence process,
but the applied shortest-path based cost functions
exclude navigational aspects. This paper takes a first
step towards incorporating both emergence (missing in
algorithmic network models) and greedy navigability
(missing in network formation games) into a single
framework, and proposes the Greedy Network Formation
Game. Our first contribution is the game definition,
where we assume a hidden metric space underneath the
network, and, instead of usual shortest path metric, we
use the length of greedy paths as the measure of
communication cost between players. Our main finding is
that greedy-routable small worlds do not emerge on
constant dimensional Eulidean grids. This simply means
that the emergence of topologies on which we understood
the principles of greedy forwarding cannot be explained
endogenously. We also present a very brief outlook on
how the situation changes in the hyperbolic space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramakrishnan:2012:EIV,
author = "Lavanya Ramakrishnan and R. Shane Canon and Krishna
Muriki and Iwona Sakrejda and Nicholas J. Wright",
title = "Evaluating Interconnect and Virtualization Performance
for High Performance Computing",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "55--60",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381071",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scientists are increasingly considering cloud
computing platforms to satisfy their computational
needs. Previous work has shown that virtualized cloud
environments can have significant performance impact.
However there is still a limited understanding of the
nature of overheads and the type of applications that
might do well in these environments. In this paper we
detail benchmarking results that characterize the
virtualization overhead and its impact on performance.
We also examine the performance of various interconnect
technologies with a view to understanding the
performance impacts of various choices. Our results
show that virtualization can have a significant impact
upon performance, with at least a 60\% performance
penalty. We also show that less capable interconnect
technologies can have a significant impact upon
performance of typical HPC applications. We also
evaluate the performance of the Amazon Cluster compute
instance and show that it performs approximately
equivalently to a 10G Ethernet cluster at low core
counts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mudalige:2012:PMA,
author = "G. R. Mudalige and M. B. Giles and C. Bertolli and P.
H. J. Kelly",
title = "Predictive modeling and analysis of {OP2} on
distributed memory {GPU} clusters",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "61--67",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381072",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "OP2 is an ``active'' library framework for the
development and solution of unstructured mesh based
applications. It aims to decouple the scientific
specification of an application from its parallel
implementation to achieve code longevity and
near-optimal performance through re-targeting the
backend to different multi-core/many-core hardware.
This paper presents a predictive performance analysis
and benchmarking study of OP2 on heterogeneous cluster
systems. We first present the design of a new OP2
back-end that enables the execution of applications on
distributed memory clusters, and benchmark its
performance during the solution of a 1.5M and 26M
edge-based CFD application written using OP2. Benchmark
systems include a large-scale CrayXE6 system and an
Intel Westmere/InfiniBand cluster. We then apply
performance modeling to predict the application's
performance on an NVIDIA Tesla C2070 based GPU cluster,
enabling us to compare OP2's performance capabilities
on emerging distributed memory heterogeneous systems.
Results illustrate the performance benefits that can be
gained through many-core solutions both on single-node
and heterogeneous configurations in comparison to
traditional homogeneous cluster systems for this class
of applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mateescu:2012:OMT,
author = "Gabriel Mateescu and Gregory H. Bauer and Robert A.
Fiedler",
title = "Optimizing matrix transposes using a {POWER7} cache
model and explicit prefetching",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "68--73",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381073",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of efficiently computing
matrix transposes on the POWER7 architecture. We
develop a matrix transpose algorithm that uses cache
blocking, cache prefetching and data alignment. We
model the POWER7 data cache and memory concurrency and
use the model to predict the memory throughput of the
proposed matrix transpose algorithm. The performance of
our matrix transpose algorithm is up to five times
higher than that of the {\tt dgetmo} routine of the
Engineering and Scientific Subroutine Library and is
2.5 times higher than that of the code generated by
compiler-inserted prefetching. Numerical experiments
indicate a good agreement between the predicted and the
measured memory throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danalis:2012:BPH,
author = "Anthony Danalis and Piotr Luszczek and Gabriel Marin
and Jeffrey S. Vetter and Jack Dongarra",
title = "{BlackjackBench}: portable hardware characterization",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "74--79",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381074",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "DARPA's AACE project aimed to develop Architecture
Aware Compiler Environments that automatically
characterizes the hardware and optimizes the
application codes accordingly. We present the
BlackjackBench --- a suite of portable benchmarks that
automate system characterization, plus statistical
analysis techniques for interpreting the results. The
BlackjackBench discovers the effective sizes and speeds
of the hardware environment rather than the often
unattainable peak values. We aim at hardware
characteristics that can be observed by running
standard C codes. We characterize the memory hierarchy,
including cache sharing and NUMA characteristics of the
system, properties of the processing cores affecting
instruction execution speed, and the length of the OS
scheduler time slot. We show how they all could
potentially interfere with each other and how
established classification and statistical analysis
techniques reduce experimental noise and aid automatic
interpretation of results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tineo:2012:TAA,
author = "Adrian Tineo and Sadaf R. Alam and Thomas C.
Schulthess",
title = "Towards autotuning by alternating communication
methods",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "80--85",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381075",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Interconnects in emerging high performance computing
systems feature hardware support for one-sided,
asynchronous communication and global address space
programming models in order to improve parallel
efficiency and productivity by allowing communication
and computation overlap and out-of-order delivery. In
practice though, complex interactions between the
software stack and the communication hardware make it
challenging to obtain optimum performance for a full
application expressed with a one-sided programming
paradigm. Here, we present a proof-of-concept study for
an autotuning framework that instantiates hybrid
kernels based on refactored codes using available
communication libraries or languages on a Cray XE6 and
a SGI Altix UV 1000. We validate our approach by
improving performance for bandwidth- and latency-bound
kernels of interest in quantum physics and astrophysics
by up to 35\% and 80\% respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Iakymchuk:2012:MPT,
author = "Roman Iakymchuk and Paolo Bientinesi",
title = "Modeling performance through memory-stalls",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "86--91",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We aim at modeling the performance of linear algebra
algorithms without executing either them or parts of
them. The performance of an algorithm can be expressed
in terms of the time spent on CPU execution and on
memory-stalls. The main concern of this paper is to
build analytical models to accurately predict
memory-stalls. The scenario in which data resides in
the L2 cache is considered; with this assumption, only
L1 cache misses occur. We construct an analytical
formula for modeling the L1 cache misses of fundamental
linear algebra operations such as those included in the
Basic Linear Algebra Subprograms (BLAS) library. The
number of cache misses occurring in higher-level
algorithms ``like a matrix factorization'' is then
predicted by combining the models for the appropriate
BLAS subroutines. As case studies, we consider GER, a
BLAS level-2 operation, and the LU factorization. The
models are validated on both Intel and AMD processors,
attaining remarkably accurate performance
predictions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shan:2012:PEH,
author = "Hongzhang Shan and Nicholas J. Wright and John Shalf
and Katherine Yelick and Marcus Wagner and Nathan
Wichmann",
title = "A preliminary evaluation of the hardware acceleration
of the {Cray Gemini} interconnect for {PGAS} languages
and comparison with {MPI}",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "92--98",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381077",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Gemini interconnect on the Cray XE6 platform
provides for lightweight remote direct memory access
(RDMA) between nodes, which is useful for implementing
partitioned global address space (PGAS) languages like
UPC and Co-Array Fortran. In this paper, we perform a
study of Gemini performance using a set of
communication microbenchmarks and compare the
performance of one-sided communication in PGAS
languages with two-sided MPI. Our results demonstrate
the performance benefits of the PGAS model on Gemini
hardware, showing in what circumstances and by how much
one-sided communication outperforms two-sided in terms
of messaging rate, aggregate bandwidth, and computation
and communication overlap capability. For example, for
8-byte and 2KB messages the one-sided messaging rate is
5 and 10 times greater respectively than the two-sided
one. The study also reveals important information about
how to optimize one-sided Gemini communication.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Deshpande:2012:AGC,
author = "Vivek Deshpande and Xing Wu and Frank Mueller",
title = "Auto-generation of communication benchmark traces",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "99--105",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381078",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarks are essential for evaluating HPC hardware
and software for petascale machines and beyond. But
benchmark creation is a tedious manual process. As a
result, benchmarks tend to lag behind the development
of complex scientific codes. Our work automates the
creation of communication benchmarks. Given an MPI
application, we utilize ScalaTrace, a lossless and
scalable framework to trace communication operations
and execution time while abstracting away the
computations. A single trace file that reflects the
behavior of all nodes is subsequently expanded to C
source code by a novel code generator. This resulting
benchmark code is compact, portable, human-readable,
and accurately reflects the original application's
communication characteristics and performance.
Experimental results demonstrate that generated source
code of benchmarks preserves both the communication
patterns and the run-time behavior of the original
application. Such automatically generated benchmarks
not only shorten the transition from application
development to benchmark extraction but also facilitate
code obfuscation, which is essential for benchmark
extraction from commercial and restricted
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2012:CPB,
author = "ChunYi Su and Dong Li and Dimitrios S. Nikolopoulos
and Matthew Grove and Kirk Cameron and Bronis R. de
Supinski",
title = "Critical path-based thread placement for {NUMA}
systems",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "106--112",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multicore multiprocessors use a Non Uniform Memory
Architecture (NUMA) to improve their scalability.
However, NUMA introduces performance penalties due to
remote memory accesses. Without efficiently managing
data layout and thread mapping to cores, scientific
applications may suffer performance loss, even if they
are optimized for NUMA. In this paper, we present
algorithms and a runtime system that optimize the
execution of OpenMP applications on NUMA architectures.
By collecting information from hardware counters, the
runtime system directs thread placement and reduces
performance penalties by minimizing the critical path
of OpenMP parallel regions. The runtime system uses a
scalable algorithm that derives placement decisions
with negligible overhead. We evaluate our algorithms
and the runtime system with four NPB applications
implemented in OpenMP. On average the algorithms
achieve between 8.13\% and 25.68\% performance
improvement, compared to the default Linux thread
placement scheme. The algorithms miss the optimal
thread placement in only 8.9\% of the cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:BMD,
author = "DongJin Lee and Michael O'Sullivan and Cameron
Walker",
title = "Benchmarking and modeling disk-based storage tiers for
practical storage design",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "113--118",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates benchmarking and modeling for
a disk-based storage system in order to design and
build a practical storage tier. As a practical case
study, we focus on the design of an archival storage
tier. The archival tiers play a critical role in data
preservation as almost all current data will eventually
be archived and the demands placed on archival tiers
are growing because of large regularly-scheduled
back-ups. Archival tiers usually consist of tape-based
devices with a large storage capacity, but limited I/O
performance for retrieving data, especially when
multiple retrieval requests are made simultaneously. As
the cost of disk-based devices continues to decrease
while the capacity of individual disks increases,
disk-based systems are becoming a more realistic option
for both enterprise and commodity archival storage
tiers. We utilize archival workloads developed from an
analysis of historical data in order to provide
accurate and robust benchmarks of system performance as
an archive. We then embed our practical measurements in
a measurement-driven optimization approach to design an
archival system. Our approach produces a low cost
design for a commodity disk-based archival storage
system. Using our measurement-driven model, ideal
storage building blocks are identified for a real-world
archival tier design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2012:TEG,
author = "Lingyuan Wang and Miaoqing Huang and Tarek
El-Ghazawi",
title = "Towards efficient {GPU} sharing on multicore
processors",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "119--124",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable systems employing a mix of GPUs with CPUs are
becoming increasingly prevalent in high-performance
computing. The presence of such accelerators introduces
significant challenges and complexities to both
language developers and end users. This paper provides
a close study of efficient coordination mechanisms to
handle parallel requests from multiple hosts of control
to a GPU under hybrid programming. Using a set of
microbenchmarks and applications on a GPU cluster, we
show that thread and process-based context hosting have
different tradeoffs. Experimental results on
application benchmarks suggest that both thread-based
context funneling and process-based context switching
natively perform similarly on the latest Fermi GPUs,
while manually guided context funneling is currently
the best way to achieve optimal performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2012:APM,
author = "Xian-He Sun and Dawei Wang",
title = "{APC}: a performance metric of memory systems",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "125--130",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the infamous ``memory wall'' problem and a
drastic increase in the number of data intensive
applications, memory rather than processor has become
the leading performance bottleneck of modern computing
systems. Evaluating and understanding memory system
performance is increasingly becoming the core of
high-end computing. Conventional memory metrics, such
as miss ratio, average miss latency, average memory
access time, etc., are designed to measure a given
memory performance parameter, and do not reflect the
overall performance of a memory system. On the other
hand, widely used system measurement metrics, such as
IPC and Flops are designed to measure CPU performance,
and do not directly reflect memory performance. In this
paper, we proposed a novel memory metric, Access Per
Cycle (APC), to measure overall memory performance with
consideration of the complexity of modern memory
systems. A unique contribution of APC is its separation
of memory evaluation from CPU evaluation; therefore, it
provides a quantitative measurement of the
``data-intensiveness'' of an application. The concept
of APC is introduced; a constructive investigation
counting the number of data accesses and access cycles
at differing levels of the memory hierarchy is
conducted; finally some important usages of APC are
presented. Simulation results show that APC is
significantly more appropriate than the existing memory
metrics in evaluating modern memory systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vitali:2012:LSO,
author = "Roberto Vitali and Alessandro Pellegrini and Francesco
Quaglia",
title = "Load sharing for optimistic parallel simulations on
multi core machines",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "2--11",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425250",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel Discrete Event Simulation (PDES) is based on
the partitioning of the simulation model into distinct
Logical Processes (LPs), each one modeling a portion of
the entire system, which are allowed to execute
simulation events concurrently. This allows exploiting
parallel computing architectures to speedup model
execution, and to make very large models tractable. In
this article we cope with the optimistic approach to
PDES, where LPs are allowed to concurrently process
their events in a speculative fashion, and rollback/
recovery techniques are used to guarantee state
consistency in case of causality violations along the
speculative execution path. Particularly, we present an
innovative load sharing approach targeted at optimizing
resource usage for fruitful simulation work when
running an optimistic PDES environment on top of
multi-processor/multi-core machines. Beyond providing
the load sharing model, we also define a load sharing
oriented architectural scheme, based on a symmetric
multi-threaded organization of the simulation platform.
Finally, we present a real implementation of the load
sharing architecture within the open source ROme
OpTimistic Simulator (ROOT-Sim) package. Experimental
data for an assessment of both viability and
effectiveness of our proposal are presented as well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hahnel:2012:MEC,
author = "Marcus H{\"a}hnel and Bj{\"o}rn D{\"o}bel and Marcus
V{\"o}lp and Hermann H{\"a}rtig",
title = "Measuring energy consumption for short code paths
using {RAPL}",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "13--17",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measuring the energy consumption of software
components is a major building block for generating
models that allow for energy-aware scheduling,
accounting and budgeting. Current measurement
techniques focus on coarse-grained measurements of
application or system events. However, fine grain
adjustments in particular in the operating-system
kernel and in application-level servers require power
profiles at the level of a single software function.
Until recently, this appeared to be impossible due to
the lacking fine grain resolution and high costs of
measurement equipment. In this paper we report on our
experience in using the Running Average Power Limit
(RAPL) energy sensors available in recent Intel CPUs
for measuring energy consumption of short code paths.
We investigate the granularity at which RAPL
measurements can be performed and discuss practical
obstacles that occur when performing these measurements
on complex modern CPUs. Furthermore, we demonstrate how
to use the RAPL infrastructure to characterize the
energy costs for decoding video slices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mazzucco:2012:EEP,
author = "Michele Mazzucco and Isi Mitrani",
title = "Empirical evaluation of power saving policies for data
centers",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "18--22",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It has been suggested that the conflicting objectives
of high performance and low power consumption in a
service center can be met by powering a block of
servers on and off, in response to changing demand
conditions. To test that proposition, a dynamic
operating policy is evaluated in a real-life setting,
using the Amazon EC2 cloud platform. The application
running on the cluster is a replica of the English
edition of Wikipedia, with different streams of
requests generated by reading traces from a file and by
means of random numbers with a given mean and squared
coefficient of variation. The system costs achieved by
an 'optimized' version of the policy are compared to
those of a simple heuristic and also to a baseline
policy consisting of keeping all servers powered on all
the time and one where servers are re-allocated
periodically but reserves are not employed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghumre:2012:ENC,
author = "Pooja Ghumre and Junwei Li and Mukil Kesavan and Ada
Gavrilovska and Karsten Schwan",
title = "Evaluating the need for complexity in energy-aware
management for cloud platforms",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "23--27",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In order to curtail the continuous increase in power
consumption of modern datacenters, researchers are
responding with sophisticated energy-aware workload
management methods. This increases the complexity and
cost of the management operation, and may lead to
increases in failure rates. The goal of this paper is
to illustrate that there exists considerable diversity
in the effectiveness of different, potentially
'smarter' workload management methods depending on the
target metric or the characteristics of the workload
being managed. We conduct experiments on a datacenter
prototype platform, virtualized with the VMware vSphere
software, and using representative cloud applications
--- a distributed key-value store and a map-reduce
computation. We observe that, on our testbed, different
workload placement decisions may be quite effective for
some metrics, but may lead to only marginal impact on
others. In particular, we are considering the impact on
energy-related metrics, such as power or temperature,
as corresponding energy-aware management methods
typically come with greater complexity due to fact that
they must consider the complex energy consumption
trends of various components in the cloud
infrastructure. We show that for certain applications,
such costs can be avoided, as different management
policies and placement decisions have marginal impact
on the target metric. The objective is to understand
whether for certain classes of applications, and/or
application configurations, it is necessary to incur,
or if it is beneficial to avoid, the use of complex
management methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2012:OSP,
author = "Nicolas Gast and Dan-Cristian Tomozei and Jean-Yves
{Le Boudec}",
title = "Optimal storage policies with wind forecast
uncertainties",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "28--32",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425255",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increase in penetration of wind in the current
energy mix is hindered by its high volatility and poor
predictability. These shortcomings lead to energy loss
and increased deployment of fast ramping generation.
The use of energy storage compensates to some extent
these negative effects; it plays a buffer role between
demand and production. We revisit a model of real
storage proposed by Bejan et al.[1]. We study the
impact on performance of energy conversion efficiency
and of wind prediction quality. Specifically, we
provide theoretical bounds on the trade-off between
energy loss and fast ramping generation, which we show
to be tight for large capacity of the available
storage. Moreover, we develop strategies that
outperform the proposed fixed level policies when
evaluated on real data from the UK grid.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bernstein:2012:SAP,
author = "Andrey Bernstein and Daniel Bienstock and David Hay
and Meric Uzuno{\u{g}}lu and Gil Zussman",
title = "Sensitivity analysis of the power grid vulnerability
to large-scale cascading failures",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "33--37",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425256",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper revisits models of cascading failures in
the transmission system of the power grid. It has been
recently shown that since power flows are governed by
the laws of physics,these models significantly differ
from epidemic/percolation-based models. Yet, while some
numerical results have been recently obtained based on
these models, there is a need to investigate the
sensitivity of the results to various parameters and to
evaluate the models' accuracy. In this paper, through
numerical experiments with real grid data, we study the
effects of geographically correlated outages and the
resulting cascades. We consider a wide range of
parameters, such as the power lines' Factor of Safety
and the sensitivity of the lines to power flow spikes.
Moreover, we compare our numerical results to the
actual events in a recent blackout in the San Diego
area (Sept. 2011), thereby demonstrating that the
model's predictions are consistent with real events.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardakanian:2012:RDC,
author = "O. Ardakanian and C. Rosenberg and S. Keshav",
title = "{RealTime} distributed congestion control for
electrical vehicle charging",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "38--42",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The significant load and unpredictable mobility of
electric vehicles (EVs) makes them a challenge for grid
distribution systems. Unlike most current approaches to
control EV charging, which construct optimal charging
schedules by predicting EV state of charge and future
behaviour, we leverage the anticipated widespread
deployment of measurement and control points to propose
an alternative vision. In our approach, drawing from a
comparative analysis of Internet and distribution grid
congestion, control actions taken by a charger every
few milliseconds in response to congestion signals
allow it to rapidly reduce its charging rate to avoid
grid congestion. We sketch three control schemes that
embody this vision and compare their relative merits
and demerits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardakanian:2012:ISR,
author = "Omid Ardakanian and Catherine Rosenberg and S.
Keshav",
title = "On the impact of storage in residential power
distribution systems",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "43--47",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is anticipated that energy storage will be
incorporated into the distribution network component of
the future smart grid to allow desirable features such
as distributed generation integration and reduction in
the peak demand. There is, therefore, an urgent need to
understand the impact of storage on distribution system
planning. In this paper, we focus on the effect of
storage on the loading of neighbourhood pole-top
transformers. We apply a probabilistic sizing technique
originally developed for sizing buffers and
communication links in telecommunications networks to
jointly size storage and transformers in the
distribution network. This allows us to compute the
potential gains from transformer upgrade deferral due
to the addition of storage. We validate our results
through numerical simulation using measurements of home
load in a testbed of 20 homes and demonstrate that our
guidelines allow local distribution companies to defer
trans- former upgrades without reducing reliability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiu:2012:EGB,
author = "David Chiu and Christopher Stewart and Bart McManus",
title = "Electric grid balancing through low-cost workload
migration",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "48--52",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy production must continuously match demand on
the electric grid. A deficiency can lead to service
disruptions, and a surplus can place tremendous stress
on grid components, potentially causing major
blackouts. To manage this balance, grid operators must
increase or lower power generation, with only a few
minutes to react. The grid balancing problem has also
impeded the pace of integrating bountiful renewable
resources (e.g., wind), whose generation is
intermittent. An emerging plan to mitigate this problem
is demand response, i.e., for grid operators to alter
the electricity usage behavior of the masses through
real-time price signals. But due to prohibitively high
infrastructure costs and societal-scale adoption,
tangible demand response mechanisms have so far been
elusive. We believe that altering the usage patterns of
a multitude of data centers can be a tangible, albeit
initial, step towards affecting demand response.
Growing in both density and size, today's data center
designs are shaped by the increasing awareness of
energy costs and carbon footprint. We posit that
shifting computational workloads (and thus, demand)
across geographic regions to match electricity supply
may help balance the grid. In this paper we will first
present a real grid balancing problem experienced in
the Pacfic Northwest. We then propose a symbiotic
relationship between data centers and grid operators by
showing that mutual cost benefits can be accessible.
Finally, we argue for a low cost workload migration
mechanism, and pose overarching challenges in designing
this framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2012:SAP,
author = "Daniel S. Menasch{\'e} and Rosa Maria Meri Le{\"a}o
and Edmundo {de Souza e Silva} and Alberto Avritzer and
Sindhu Suresh and Kishor Trivedi and Raymond A. Marie
and Lucia Happe and Anne Koziolek",
title = "Survivability analysis of power distribution in smart
grids with active and reactive power modeling",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "53--57",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425260",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2012:UDA,
author = "E. G. {Coffman, Jr.} and Y. Kogan and W. Lai and V.
Ramaswami",
title = "Uptime and downtime analysis for hierarchical
redundant systems in telecommunications",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "59--61",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425262",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider non-degradable hierarchical redundant
systems having multiple working and failure modes with
restoration time depending on failure type. We evaluate
these systems using two measures: generalized uptime
and traditional downtime. We define the Impact Weighted
System Uptime (IWSU) and illustrate its usefulness in
practical terms, viz., an IP router. Next, we provide
an analysis that fits the downtimes by a heavy-tailed
log PH distribution. For these downtime distributions,
we study whether it is more cost effective to reduce
failure rates or to speed up the response to failures
The first option is a vendor problem, but the second is
a service provider problem. A numerical example is
given to help appreciate the tradeoff.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Avrachenkov:2012:OCC,
author = "K. Avrachenkov and U. Ayesta and J. Doncel and P.
Jacko",
title = "Optimal congestion control of {TCP} flows for
{Internet} routers",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "62--64",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we address the problem of fast and fair
transmission of flows in a router, which is a
fundamental issue in networks like the Internet. We
model the interaction between a TCP source and a
bottleneck queue with the objective of designing
optimal packet admission controls in the bottleneck
queue. We focus on the relaxed version of the problem
obtained by relaxing the fixed buffer capacity
constraint that must be satisfied at all time epoch.
The relaxation allows us to reduce the multi-ow problem
into a family of single-ow problems, for which we can
analyze both theoretically and numerically the
existence of optimal control policies of special
structure. In particular, we show that for a variety of
parameters, TCP ows can be optimally controlled in
routers by so-called index policies. We have
implemented index policies in Network Simulator-3
(NS-3) and compared its performance with DropTail and
RED buffers. The simulation results show that the index
policy has several desirable properties with respect to
fairness and efficiency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schorgendorfer:2012:TLB,
author = "Angela Sch{\"o}rgendorfer and Peter M. van de Ven and
Bo Zhang",
title = "Temporal load balancing for distributed backup
scheduling",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "65--67",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425264",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rochman:2012:ERM,
author = "Yuval Rochman and Hanoch Levy and Eli Brosh",
title = "Efficient replication in multi-regional peer-supported
{VoD} systems",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "68--70",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425265",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borgs:2012:PQ,
author = "Christian Borgs and Jennifer T. Chayes and Sherwin
Doroudi and Mor Harchol-Balter and Kuang Xu",
title = "Pricing and queueing",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "71--73",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425266",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a pricing in a single observable queue,
where customers all have the same valuation, V, and
the same waiting cost, v. It is known that earning rate
is maximized in such a model when state-dependent
pricing is used and an admissions threshold is deployed
whereby arriving customers may not join the queue if
the total number of customers exceeds this threshold.
This paper is the first to explicitly derive the
optimal threshold. We use our explicit formulation to
obtain asymptotic results on how the threshold grows
with V.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Godtschalk:2012:SBR,
author = "Antonie S. Godtschalk and Florin Ciucu",
title = "Stochastic bounds for randomized load balancing",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "74--76",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425267",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Randomized load balancing is a cost efficient policy
for job scheduling in parallel server queueing systems
whereby, with every incoming job, a central dispatcher
randomly polls some servers and selects the one with
the smallest queue. By exactly deriving the jobs' delay
distribution in such systems, in explicit and closed
form, Mitzenmacher [5] proved the so-called
`power-of-two' result, which states that by randomly
polling only two servers yields an exponential
improvement in delay over randomly selecting a single
server. Such a fundamental result, however, was
obtained in an asymptotic regime in the total number of
servers, and does do not necessarily provide accurate
estimates for practical finite regimes with small or
moderate number of servers. In this paper we obtain
stochastic lower and upper bounds on the jobs' average
delay in non-asymptotic regimes, by borrowing ideas for
analyzing the particular case of the
Join-the-Shortest-Queue (JSQ) policy. Numerical
illustrations indicate not only that the obtained
bounds are remarkably accurate, but also that the
existing exact but asymptotic results can be largely
misleading in some finite regimes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Myers:2012:EQL,
author = "Daniel S. Myers and Mary K. Vernon",
title = "Estimating queue length distributions for queues with
random arrivals",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "77--79",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425268",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work develops an accurate and efficient
two-moment approximation for the queue length
distribution in the M/G/1 queue. Queue length
distributions can provide insight into the impact of
system design changes that go beyond simple averages,
but conventional queueing theory lacks efficient
techniques for estimating the long-run queue length
distribution when service times are not exponential.
The approximate queue lengths depend on only the first
and second moments of the service time rather than the
full service time distribution, resulting in a model
that is applicable to a wide variety of systems.
Validation results show that the new approximation is
highly accurate for light-tailed service time
distributions. Work in progress includes developing
accurate approximations for multi-server queues and
heavy-tailed service distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cremonesi:2012:MRT,
author = "Paolo Cremonesi and Andrea Sansottera",
title = "Modeling response times in the {Google ROADEF\slash
EURO} challenge",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "80--82",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425269",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we extend the machine reassignment
model proposed by Google for the ROADEF/EURO Challenge.
The aim of the challenge is to develop algorithms for
the efficient solutions of data-center consolidation
problems. The problem stated in the challenge mainly
focus on dependability requirements and does not take
into account performance requirements (end-to-end
response times). We extend the Google problem
definition by modeling and constraining end-to-end
response times. We provide experimental results to show
the effectiveness of this extension.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2012:PLSb,
author = "Yue Tan and Yingdong Lu and Cathy H. Xia",
title = "Provisioning for large scale loss network systems with
applications in cloud computing",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "83--85",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425270",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2012:CCT,
author = "Ranjan Pal and Pan Hui",
title = "{CyberInsurance} for cybersecurity a topological take
on modulating insurance premiums",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "86--88",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425271",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recent conjecture in cyber-insurance research states
that for compulsory monopolistic insurance scenarios,
charging fines and rebates on fair premiums will
incentivize network users to invest in self-defense
investments, thereby making cyber-space more robust.
Assuming the validity of the conjecture in this paper,
we adopt a topological perspective in proposing a
mechanism that accounts for (i) the positive
externalities posed (through self-defense investments)
by network users on their peers, and (ii) network
location (based on centrality measures) of users, and
provides an appropriate way to proportionally allocate
fines/rebates on user premiums. We mathematically
justify (via a game-theoretic analysis) that optimal
fine/rebates per user should be allocated in proportion
to the Bonacich or eigenvector centrality value of the
user.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elahi:2012:MFD,
author = "Maryam Elahi and Carey Williamson and Philipp
Woelfel",
title = "Meeting the fairness deadline in speed scaling
systems: is turbocharging enough?",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "89--91",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425272",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we explore the notion of 'turbocharging'
in speed scaling systems, and ask whether this is
sufficient to preserve the strong dominance property of
FSP over PS. The answer turns out to be no, but the
analysis yields useful insights into the design of
speed scaling systems that can outperform PS in
response time, energy consumption, or perhaps both.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2012:ASQ,
author = "Eitan Bachmat and Assaf Natanzon",
title = "Analysis of {SITA} queues with many servers and
spacetime geometry",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "92--94",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425273",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2012:RSS,
author = "Thomas Bonald and Davide Cuda",
title = "{RateOptimal} scheduling schemes for asynchronous
{InputQueued} packet switches",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "95--97",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425274",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of input-queued packet switches
critically depends on the scheduling scheme that
connects the input ports to the output ports. We show
that, when packets are switched asynchronously, simple
scheduling schemes where contention is solved locally
at each input or output can achieve rate optimality,
without any speed-up of the internal transmission
rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2012:OOS,
author = "Minghong Lin and Adam Wierman and Alan Roytman and
Adam Meyerson and Lachlan L. H. Andrew",
title = "Online optimization with switching cost",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "98--100",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425275",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider algorithms for ``smoothed online convex
optimization (SOCO)'' problems. SOCO is a variant of
the class of ``online convex optimization (OCO)''
problems that is strongly related to the class of
``metrical task systems'', each of which have been
studied extensively. Prior literature on these problems
has focused on two performance metrics: regret and
competitive ratio. There exist known algorithms with
sublinear regret and known algorithms with constant
competitive ratios; however no known algorithms achieve
both. In this paper, we show that this is due to a
fundamental incompatibility between regret and the
competitive ratio --- no algorithm (deterministic or
randomized) can achieve sublinear regret and a constant
competitive ratio, even in the case when the objective
functions are linear.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blaszczyszyn:2012:FVW,
author = "B. Blaszczyszyn and K. Gaurav",
title = "Farout vertices in weighted repeated configuration
model",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "100--103",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425276",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an edge-weighted uniform random graph with
a given degree sequence (Repeated Configuration Model)
which is a useful approximation for many real-world
networks. It has been observed that the vertices which
are separated from the rest of the graph by a distance
exceeding certain threshold play an important role in
determining some global properties of the graph like
diameter, ooding time etc., in spite of being
statistically rare. We give a convergence result for
the distribution of the number of such far-out
vertices. We also make a conjecture about how this
relates to the longest edge of the minimal spanning
tree on the graph under consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papadopoulos:2012:RGG,
author = "Fragkiskos Papadopoulos and Constantinos Psomas and
Dmitri Krioukov",
title = "Replaying the geometric growth of complex networks and
application to the {AS Internet}",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "104--106",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425277",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2012:NCV,
author = "Ali Tizghadam and Weiwei Li and Alberto Leon-Garcia",
title = "Network criticality in vehicular networks",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "107--109",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network criticality (resistance distance) is a
graph-theoretic metric that quantifies network
robustness, and that was originally designed to capture
the effect of environmental changes in core
communication networks. This paper establishes a
relationship between information centrality and network
criticality and provides a justification for using the
average network criticality of a node to quantify the
nodes relative importance in a graph.This results
provides a basis for designing robust clustering
algorithms for vehicular networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lui:2013:SPC,
author = "John C. S. Lui and Li Zhang",
title = "A study of pricing for cloud resources",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "3--12",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479944",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a study of pricing cloud resources in this
position paper. Our objective is to explore and
understand the interplay between economics and systems
designs proposed by recent research. We develop a
general model that captures the resource needs of
various applications and usage pricing of cloud
computing. We show that a uniform price does not suffer
any revenue loss compared to first-order price
discrimination. We then consider alternative strategies
that a provider can use to improve revenue, including
resource throttling and performance guarantees, enabled
by recent technical developments. We prove that
throttling achieves the maximum revenue at the expense
of tenant surplus, while providing performance
guarantees with an extra fee is a fairer solution for
both parties. We further extend the model to
incorporate the cost aspect of the problem, and the
possibility of right-sizing capacity. We reveal another
interesting insight that in some cases, instead of
focusing on right-sizing, the provider should work on
the demand and revenue side of the equation, and
pricing is a more feasible and simpler solution. Our
claims are evaluated through extensive trace-driven
simulations with real-world workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2013:SCI,
author = "Zhizhong Zhang and Chuan Wu and David W. L. Cheung",
title = "A survey on cloud interoperability: taxonomies,
standards, and practice",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "13--22",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479945",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud computing is a new computing paradigm that
allows users with different computing demands to access
a shared pool of configurable computing resources
(e.g., servers, network, storage, database,
applications and services). Many commercial cloud
providers have emerged in the past 6-7 years, and each
typically provides its own cloud infrastructure, APIs
and application description formats to access the cloud
resources, as well as support for service level
agreements (SLAs). Such vendor lock-in has seriously
limited the flexibility that cloud end users would like
to process, when it comes to deploy applications over
different infrastructures in different geographic
locations, or to migrate a service from one provider's
cloud to another. To enable seamless sharing of
resources from a pool of cloud providers, efforts have
emerged recently to facilitate cloud interoperability,
i.e., the ability for multiple cloud providers to work
together, from both the industry and academia. In this
article, we conduct a comprehensive survey on the
state-of-the-art efforts, with a focus on
interoperability among different IaaS (infrastructure
as a service) cloud platforms. We investigate the
existing studies on taxonomies and standardization of
cloud interoperability, as well as practical cloud
technologies from both the cloud provider's and user's
perspectives to enable interoperation. We pose issues
and challenges to advance the topic area, and hope to
pave a way for the forthcoming research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2013:FPE,
author = "Lei Yang and Jiannong Cao and Yin Yuan and Tao Li and
Andy Han and Alvin Chan",
title = "A framework for partitioning and execution of data
stream applications in mobile cloud computing",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "23--32",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479946",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The contribution of cloud computing and mobile
computing technologies lead to the newly emerging
mobile cloud computing paradigm. Three major approaches
have been proposed for mobile cloud applications: (1)
extending the access to cloud services to mobile
devices; (2) enabling mobile devices to work
collaboratively as cloud resource providers; (3)
augmenting the execution of mobile applications on
portable devices using cloud resources. In this paper,
we focus on the third approach in supporting mobile
data stream applications. More specifically, we study
how to optimize the computation partitioning of a data
stream application between mobile and cloud to achieve
maximum speed/throughput in processing the streaming
data. To the best of our knowledge, it is the first
work to study the partitioning problem for mobile data
stream applications, where the optimization is placed
on achieving high throughput of processing the
streaming data rather than minimizing the makespan of
executions as in other applications. We first propose a
framework to provide runtime support for the dynamic
computation partitioning and execution of the
application. Different from existing works, the
framework not only allows the dynamic partitioning for
a single user but also supports the sharing of
computation instances among multiple users in the cloud
to achieve efficient utilization of the underlying
cloud resources. Meanwhile, the framework has better
scalability because it is designed on the elastic cloud
fabrics. Based on the framework, we design a genetic
algorithm for optimal computation partition. Both
numerical evaluation and real world experiment have
been performed, and the results show that the
partitioned application can achieve at least two times
better performance in terms of throughput than the
application without partitioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2013:TOA,
author = "Weina Wang and Kai Zhu and Lei Ying and Jian Tan and
Li Zhang",
title = "A throughput optimal algorithm for map task scheduling
in {MapReduce} with data locality",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "33--42",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479947",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MapReduce/Hadoop framework has been widely used to
process large-scale datasets on computing clusters.
Scheduling map tasks to improve data locality is
crucial to the performance of MapReduce. Many works
have been devoted to increasing data locality for
better efficiency. However, to the best of our
knowledge, fundamental limits of MapReduce computing
clusters with data locality, including the capacity
region and throughput optimal algorithms, have not been
studied. In this paper, we address these problems from
a stochastic network perspective. Our focus is to
strike the right balance between data-locality and
load-balancing to maximize throughput. We present a new
queueing architecture and propose a map task scheduling
algorithm constituted by the Join the Shortest Queue
policy together with the MaxWeight policy. We identify
an outer bound on the capacity region, and then prove
that the proposed algorithm can stabilize any arrival
rate vector strictly within this outer bound. It shows
that the algorithm is throughput optimal and the outer
bound coincides with the actual capacity region. The
proofs in this paper deal with random processing time
with different parameters and nonpreemptive tasks,
which differentiate our work from many other works, so
the proof technique itself is also a contribution of
this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2013:ESC,
author = "Qun Huang and Patrick P. C. Lee",
title = "An experimental study of cascading performance
interference in a virtualized environment",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "43--52",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479948",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a consolidated virtualized environment, multiple
virtual machines (VMs) are hosted atop a shared
physical substrate. They share the underlying hardware
resources as well as the software virtualization
components. Thus, one VM can generate performance
interference to another co-resident VM. This work
explores the adverse impact of performance interference
from a security perspective. We present a new class of
attacks, namely the cascade attacks, in which an
adversary seeks to generate performance interference
using a malicious VM. One distinct property of the
cascade attacks is that when the malicious VM exhausts
one type of hardware resources, it will bring
``cascading'' interference to another type of hardware
resources. We present four different implementations of
cascade attacks and evaluate their effectiveness atop
the Xen virtualization platform. We show that a victim
VM can see significant performance degradation (e.g.,
throughput drops in network and disk I/Os) due to the
cascade attacks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2013:AMW,
author = "Rahul Singh and Prashant Shenoy and Maitreya Natu and
Vaishali Sadaphal and Harrick Vin",
title = "Analytical modeling for what-if analysis in complex
cloud computing applications",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "53--62",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479949",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern cloud applications are complex distributed
systems with tens or hundreds of interacting software
components. An important management task in cloud
computing platforms is to predict the impact of a
certain workload or reconfiguration change on the
performance of the application. Such predictions
require the design of ``what-if'' models of the
application that take as input hypothetical changes in
the application's workload or environment and estimate
its impact on performance. We present a workload-based
what-if analysis system that uses commonly available
monitoring information in large scale systems to enable
the administrators to ask a variety of workload-based
``what-if'' queries about the system. We use a network
of queues to analytically model the behavior of large
distributed cloud applications. Our system
automatically generates node-level queueing models and
then uses model composition to build system-wide
models. We employ a simple what-if query language and
an intelligent query execution algorithm that employs
on-the-fly model construction and a change propagation
algorithm to efficiently answer queries on large scale
systems. We have built a prototype and have used traces
from two large production cloud applications from a
financial institution as well as real-world synthetic
applications to evaluate its what-if modeling
framework. Our experimental evaluation validates the
accuracy of our node-level resource usage, latency and
workload models and then shows how our system enables
what-if analysis in four different cloud
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2013:DCR,
author = "Jia Liu and Cathy H. Xia and Ness B. Shroff and
Xiaodong Zhang",
title = "On distributed computation rate optimization for
deploying cloud computing programming frameworks",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "63--72",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the rapidly growing challenges of big data
analytics, the need for efficient and distributed
algorithms to optimize cloud computing performances is
unprecedentedly high. In this paper, we consider how to
optimally deploy a cloud computing programming
framework (e.g., MapReduce and Dryad) over a given
underlying network hardware infrastructure to maximize
the end-to-end computation rate and minimize the
overall computation and communication costs. The main
contributions in this paper are three-fold: (i) we
develop a new network flow model with a generalized
flow-conservation law to enable a systematic design of
distributed algorithms for computation rate utility
maximization problems (CRUM) in cloud computing; (ii)
based on the network flow model, we reveal key
separable properties of the dual functions of Problem
CRUM, which further lead to a distributed algorithm
design; and (iii) we offer important networking
insights and meaningful economic interpretations for
the proposed algorithm and point out their connections
to and distinctions from distributed algorithms design
in traditional data communications networks. This paper
serves as an important first step towards the
development of a theoretical foundation for distributed
computation analytics in cloud computing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2013:MEV,
author = "Giuliano Casale and Mirco Tribastone",
title = "Modelling exogenous variability in cloud deployments",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "73--82",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479951",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Describing exogenous variability in the resources used
by a cloud application leads to stochastic performance
models that are difficult to solve. In this paper, we
describe the blending algorithm, a novel approximation
for queueing network models immersed in a random
environment. Random environments are Markov chain-based
descriptions of time-varying operational conditions
that evolve independently of the system state,
therefore they are natural descriptors for exogenous
variability in a cloud deployment. The algorithm adopts
the principle of solving a separate transient-analysis
subproblem for each state of the random environment.
Each subproblem is then approximated by a system of
ordinary differential equations formulated according to
a fluid limit theorem, making the approach scalable and
computationally inexpensive. A validation study on
several hundred models shows that blending can save up
to two orders of magnitude of computational time
compared to simulation, enabling efficient exploration
of a decision space, which is useful in particular at
design-time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mahmood:2013:TNE,
author = "Shah Mahmood and Yvo Desmedt",
title = "Two new economic models for privacy",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "84--89",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Private data is leaked more and more in our society.
Wikileaks, Facebook, and identity theft are just three
examples. So, modeling privacy is important.
Cryptographers do not provide methods to address
whether data should remain private or not. The use of
entropy does not reflect the cost associated with the
loss of private data. In this paper we provide two
economic models for privacy. Our first model is a
lattice structured extension of attack graphs. Our
second model is a stochastic almost combinatorial game,
where two or more players can make stochastic moves in
an almost combinatorial setup. In both models, a user
can decide attempting transitions between states,
representing a user's private information, based on
multiple criterion including the cost of an attempt,
the probability of success, the number of earlier
attempts to obtain this private information and
(possibly) the available budget. In a variant of our
models we use multigraphs. We use this when a
transition between two states could be performed in
different ways. To reduce the increase in complexity,
we introduce a technique converting the multigraph to a
simple directed graph. We discuss the advantages and
disadvantages of this conversion. We also briefly
discuss potential uses of our privacy models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hutton:2013:AEP,
author = "Luke Hutton and Tristan Henderson",
title = "An architecture for ethical and privacy-sensitive
social network experiments",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "90--95",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Social Network Sites (SNSs) are used for sharing
personal data and delivering personalised services to
hundreds of millions of users, and thus represent an
important sector of the Digital Economy. Measuring and
collecting data from SNSs is crucial for research and
development of new services, but the sensitive and
personal nature of these data means that great care
must be taken by researchers when conducting SNS
studies. This paper presents a work-in-progress
architecture for conducting experiments across multiple
SNSs while acknowledging and preserving participant
privacy. We evaluate the architecture by conducting an
experiment using live SNS data, exploring willingness
to share sensitive data with researchers. We also
outline some outstanding challenges as we finalise the
implementation of the architecture.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2013:LCI,
author = "Minaxi Gupta and Yuqing (Melanie) Wu and Swapnil S.
Joshi and Aparna Tiwari and Ashish Nair and Ezhilan
Ilangovan",
title = "On the linkability of complementary information from
free versions of people databases",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "96--100",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479955",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The privacy of hundreds of millions of people today
could be compromised due to people databases which
claim to store many personal details about individuals,
often without their knowledge. While the paid versions
of these databases may be prohibitively expensive for
data mining on a mass scale, in this paper, we show
that even the limited information provided by the
unpaid versions of these databases can be effectively
exploited for its complementarity and poses a
significant privacy threat since an adversary can mine
this information on a mass scale free of cost and then
use it to his/her advantage, hurting the privacy of
individuals.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tschorsch:2013:HBT,
author = "Florian Tschorsch and Bj{\"o}rn Scheuermann",
title = "How (not) to build a transport layer for anonymity
overlays",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "101--106",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479956",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet anonymity systems, like for instance Tor, are
in widespread use today. Technically they are realized
as overlays, i.e., they add another instance of
routing, forwarding, and transport functionality on top
of the Internet protocol stack. This has important (and
often subtle) implications, as overlay and underlay may
interact. Far too often, existing designs neglect this.
Consequently, they suffer from performance issues that
are hard to track down and fix. The existing body of
work in this area often takes a quite narrow view,
tweaking the design in order to improve one specific
aspect. The behavior of the interacting underlay and
overlay transport layers is complex, though, and often
causes unexpected-and unexplored-side effects.
Therefore, we show that so far considered combinations
of overlay and underlay protocols cannot deliver good
throughput, latency, and fairness at the same time, and
we establish guidelines for a future, better suited
transport layer design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Prabhakar:2013:DLS,
author = "Balaji Prabhakar",
title = "Designing large-scale nudge engines",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "1--2",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465766",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many of the challenges faced by the modern world,
from overcrowded transportation systems to
overstretched healthcare systems, large benefits for
society come about from small changes by very many
individuals. We survey the problems and the cost they
impose on society, and describe a framework for
designing ``nudge engines'' --- algorithms, incentives
and technology for influencing human behavior. We
present a model for analyzing their effectiveness and
results from transportation pilots conducted in
Bangalore, at Stanford, and in Singapore, and a
wellness program for the employees of Accenture-USA.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maltz:2013:CCS,
author = "David A. Maltz",
title = "Challenges in cloud scale data centers",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "3--4",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465767",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data centers are fascinating places, where the massive
scale required to deliver on-line services like web
search and cloud hosting turns minor issues into major
challenges that must be addressed in the design of the
physical infrastructure and the software platform. In
this talk, I'll briefly overview the kinds of
applications that run in mega-data centers and the
workloads they place on the infrastructure. I'll then
describe a number of challenges seen in Microsoft's
data centers, with the goals of posing questions more
than describing solutions and explaining how economic
factors, technology issues, and software design
interact when creating low-latency, low-cost, high
availability services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2013:PCG,
author = "Xia Zhou and Zengbin Zhang and Gang Wang and Xiaoxiao
Yu and Ben Y. Zhao and Haitao Zheng",
title = "Practical conflict graphs for dynamic spectrum
distribution",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "5--16",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most spectrum distribution proposals today develop
their allocation algorithms that use conflict graphs to
capture interference relationships. The use of conflict
graphs, however, is often questioned by the wireless
community because of two issues. First, building
conflict graphs requires significant overhead and hence
generally does not scale to outdoor networks, and
second, the resulting conflict graphs do not capture
accumulative interference. In this paper, we use
large-scale measurement data as ground truth to
understand just how severe these issues are in
practice, and whether they can be overcome. We build
``practical'' conflict graphs using
measurement-calibrated propagation models, which remove
the need for exhaustive signal measurements by
interpolating signal strengths using calibrated models.
These propagation models are imperfect, and we study
the impact of their errors by tracing the impact on
multiple steps in the process, from calibrating
propagation models to predicting signal strength and
building conflict graphs. At each step, we analyze the
introduction, propagation and final impact of errors,
by comparing each intermediate result to its ground
truth counterpart generated from measurements. Our work
produces several findings. Calibrated propagation
models generate location-dependent prediction errors,
ultimately producing conservative conflict graphs.
While these ``estimated conflict graphs'' lose some
spectrum utilization, their conservative nature
improves reliability by reducing the impact of
accumulative interference. Finally, we propose a graph
augmentation technique that addresses any remaining
accumulative interference, the last missing piece in a
practical spectrum distribution system using
measurement-calibrated conflict graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2013:FLC,
author = "Muhammad Zubair Shafiq and Lusheng Ji and Alex X. Liu
and Jeffrey Pang and Shobha Venkataraman and Jia Wang",
title = "A first look at cellular network performance during
crowded events",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "17--28",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During crowded events, cellular networks face voice
and data traffic volumes that are often orders of
magnitude higher than what they face during routine
days. Despite the use of portable base stations for
temporarily increasing communication capacity and free
Wi-Fi access points for offloading Internet traffic
from cellular base stations, crowded events still
present significant challenges for cellular network
operators looking to reduce dropped call events and
improve Internet speeds. For effective cellular network
design, management, and optimization, it is crucial to
understand how cellular network performance degrades
during crowded events, what causes this degradation,
and how practical mitigation schemes would perform in
real-life crowded events. This paper makes a first step
towards this end by characterizing the operational
performance of a tier-1 cellular network in the United
States during two high-profile crowded events in 2012.
We illustrate how the changes in population
distribution, user behavior, and application workload
during crowded events result in significant voice and
data performance degradation, including more than two
orders of magnitude increase in connection failures.
Our findings suggest two mechanisms that can improve
performance without resorting to costly infrastructure
changes: radio resource allocation tuning and
opportunistic connection sharing. Using trace-driven
simulations, we show that more aggressive release of
radio resources via 1-2 seconds shorter RRC timeouts as
compared to routine days helps to achieve better
tradeoff between wasted radio resources, energy
consumption, and delay during crowded events; and
opportunistic connection sharing can reduce connection
failures by 95\% when employed by a small number of
devices in each cell sector.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ding:2013:CMI,
author = "Ning Ding and Daniel Wagner and Xiaomeng Chen and
Abhinav Pathak and Y. Charlie Hu and Andrew Rice",
title = "Characterizing and modeling the impact of wireless
signal strength on smartphone battery drain",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "29--40",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2466586",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the tremendous market penetration of
smartphones, their utility has been and will remain
severely limited by their battery life. A major source
of smartphone battery drain is accessing the Internet
over cellular or WiFi connection when running various
apps and services. Despite much anecdotal evidence of
smartphone users experiencing quicker battery drain in
poor signal strength, there has been limited
understanding of how often smartphone users experience
poor signal strength and the quantitative impact of
poor signal strength on the phone battery drain. The
answers to such questions are essential for diagnosing
and improving cellular network services and smartphone
battery life and help to build more accurate online
power models for smartphones, which are building blocks
for energy profiling and optimization of smartphone
apps. In this paper, we conduct the first measurement
and modeling study of the impact of wireless signal
strength on smartphone energy consumption. Our study
makes four contributions. First, through analyzing
traces collected on 3785 smartphones for at least one
month, we show that poor signal strength of both 3G and
WiFi is routinely experienced by smartphone users, both
spatially and temporally. Second, we quantify the extra
energy consumption on data transfer induced by poor
wireless signal strength. Third, we develop a new power
model for WiFi and 3G that incorporates the signal
strength factor and significantly improves the modeling
accuracy over the previous state of the art. Finally,
we perform what-if analysis to quantify the potential
energy savings from opportunistically delaying network
traffic by exploring the dynamics of signal strength
experienced by users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stolyar:2013:LSS,
author = "Alexander L. Stolyar and Yuan Zhong",
title = "A large-scale service system with packing constraints:
minimizing the number of occupied servers",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "41--52",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "We consider a large-scale service system model
proposed in [14], which is motivated by the problem of
efficient placement of virtual machines to physical
host machines in a network cloud, so that the total
number of occupied hosts is minimized. Customers of
different types arrive to a system with an infinite
number of servers. A server packing configuration is
the vector k = {k$_i$ }, where k$_i$ is the number of
type-i customers that the server ``contains''. Packing
constraints are described by a fixed finite set of
allowed configurations. Upon arrival, each customer is
placed into a server immediately, subject to the
packing constraints; the server can be idle or already
serving other customers. After service completion, each
customer leaves its server and the system. It was shown
in [14] that a simple real-time algorithm, called
Greedy, is asymptotically optimal in the sense of
minimizing \Sigma $_k$ X$_k^{1 + \alpha }$ in the
stationary regime, as the customer arrival rates grow
to infinity. (Here \alpha {$>$} 0, and X$_k$ denotes
the number of servers with configuration k.) In
particular, when parameter \alpha is small, and in the
asymptotic regime where customer arrival rates grow to
infinity, Greedy solves a problem approximating one of
minimizing \Sigma $_k$ X$_k$, the number of occupied
hosts. In this paper we introduce the algorithm called
Greedy with sublinear Safety Stocks (GSS), and show
that it asymptotically solves the exact problem of
minimizing \Sigma $_k$ X$_k$. An important feature of
the algorithm is that sublinear safety stocks of X$_k$
are created automatically --- when and where necessary
--- without having to determine a priori where they are
required. Moreover, we also provide a tight
characterization of the rate of convergence to
optimality under GSS. The GSS algorithm is as simple as
Greedy, and uses no more system state information than
Greedy does.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2013:OEG,
author = "Lian Lu and Jinlong Tu and Chi-Kin Chau and Minghua
Chen and Xiaojun Lin",
title = "Online energy generation scheduling for microgrids
with intermittent energy sources and co-generation",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "53--66",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Microgrids represent an emerging paradigm of future
electric power systems that can utilize both
distributed and centralized generations. Two recent
trends in microgrids are the integration of local
renewable energy sources (such as wind farms) and the
use of co-generation ( i.e., to supply both electricity
and heat). However, these trends also bring
unprecedented challenges to the design of intelligent
control strategies for microgrids. Traditional
generation scheduling paradigms rely on perfect
prediction of future electricity supply and demand.
They are no longer applicable to microgrids with
unpredictable renewable energy supply and with
co-generation (that needs to consider both electricity
and heat demand). In this paper, we study online
algorithms for the microgrid generation scheduling
problem with intermittent renewable energy sources and
co-generation, with the goal of maximizing the
cost-savings with local generation. Based on the
insights from the structure of the offline optimal
solution, we propose a class of competitive online
algorithms, called CHASE (Competitive Heuristic
Algorithm for Scheduling Energy-generation), that track
the offline optimal in an online fashion. Under typical
settings, we show that CHASE achieves the best
competitive ratio among all deterministic online
algorithms, and the ratio is no larger than a small
constant 3. We also extend our algorithms to
intelligently leverage on limited prediction of the
future, such as near-term demand or wind forecast. By
extensive empirical evaluations using real-world
traces, we show that our proposed algorithms can
achieve near offline-optimal performance. In a
representative scenario, CHASE leads to around 20\%
cost reduction with no future look-ahead, and the cost
reduction increases with the future look-ahead
window.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanmuganathan:2013:DCU,
author = "Ganesha Shanmuganathan and Ajay Gulati and Peter
Varman",
title = "Defragmenting the cloud using demand-based resource
allocation",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "67--80",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current public cloud offerings sell capacity in the
form of pre-defined virtual machine (VM) configurations
to their tenants. Typically this means that tenants
must purchase individual VM configurations based on the
peak demands of the applications, or be restricted to
only scale-out applications that can share a pool of
VMs. This diminishes the value proposition of moving to
a public cloud as compared to server consolidation in a
private virtualized datacenter, where one gets the
benefits of statistical multiplexing between VMs
belonging to the same or different applications.
Ideally one would like to enable a cloud tenant to buy
capacity in bulk and benefit from statistical
multiplexing among its workloads. This requires the
purchased capacity to be dynamically and transparently
allocated among the tenant's VMs that may be running on
different servers, even across datacenters. In this
paper, we propose two novel algorithms called BPX and
DBS that are able to provide the cloud customer with
the abstraction of buying bulk capacity. These
algorithms dynamically allocate the bulk capacity
purchased by a customer between its VMs based on their
individual demands and user-set importance. Our
algorithms are highly scalable and are designed to work
in a large-scale distributed environment. We
implemented a prototype of BPX as part of VMware's
management software and showed that BPX is able to
closely mimic the behavior of a centralized allocator
in a distributed manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Karger:2013:ECM,
author = "David R. Karger and Sewoong Oh and Devavrat Shah",
title = "Efficient crowdsourcing for multi-class labeling",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "81--92",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Crowdsourcing systems like Amazon's Mechanical Turk
have emerged as an effective large-scale human-powered
platform for performing tasks in domains such as image
classification, data entry, recommendation, and
proofreading. Since workers are low-paid (a few cents
per task) and tasks performed are monotonous, the
answers obtained are noisy and hence unreliable. To
obtain reliable estimates, it is essential to utilize
appropriate inference algorithms (e.g. Majority voting)
coupled with structured redundancy through task
assignment. Our goal is to obtain the best possible
trade-off between reliability and redundancy. In this
paper, we consider a general probabilistic model for
noisy observations for crowd-sourcing systems and pose
the problem of minimizing the total price (i.e.
redundancy) that must be paid to achieve a target
overall reliability. Concretely, we show that it is
possible to obtain an answer to each task correctly
with probability 1- \epsilon as long as the redundancy
per task is O((K/q) log (K/ \epsilon )), where each
task can have any of the $K$ distinct answers equally
likely, q is the crowd-quality parameter that is
defined through a probabilistic model. Further,
effectively this is the best possible
redundancy-accuracy trade-off any system design can
achieve. Such a single-parameter crisp characterization
of the (order-)optimal trade-off between redundancy and
reliability has various useful operational
consequences. Further, we analyze the robustness of our
approach in the presence of adversarial workers and
provide a bound on their influence on the
redundancy-accuracy trade-off. Unlike recent prior work
[GKM11, KOS11, KOS11], our result applies to non-binary
(i.e. {K$>$2}) tasks. In effect, we utilize algorithms
for binary tasks (with inhomogeneous error model unlike
that in [GKM11, KOS11, KOS11]) as key subroutine to
obtain answers for K-ary tasks. Technically, the
algorithm is based on low-rank approximation of
weighted adjacency matrix for a random regular
bipartite graph, weighted according to the answers
provided by the workers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2013:RCD,
author = "Myunghwan Kim and Roshan Sumbaly and Sam Shah",
title = "Root cause detection in a service-oriented
architecture",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "93--104",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale websites are predominantly built as a
service-oriented architecture. Here, services are
specialized for a certain task, run on multiple
machines, and communicate with each other to serve a
user's request. An anomalous change in a metric of one
service can propagate to other services during this
communication, resulting in overall degradation of the
request. As any such degradation is revenue impacting,
maintaining correct functionality is of paramount
concern: it is important to find the root cause of any
anomaly as quickly as possible. This is challenging
because there are numerous metrics or sensors for a
given service, and a modern website is usually composed
of hundreds of services running on thousands of
machines in multiple data centers. This paper
introduces MonitorRank, an algorithm that can reduce
the time, domain knowledge, and human effort required
to find the root causes of anomalies in such
service-oriented architectures. In the event of an
anomaly, MonitorRank provides a ranked order list of
possible root causes for monitoring teams to
investigate. MonitorRank uses the historical and
current time-series metrics of each sensor as its
input, along with the call graph generated between
sensors to build an unsupervised model for ranking.
Experiments on real production outage data from
LinkedIn, one of the largest online social networks,
shows a 26\% to 51\% improvement in mean average
precision in finding root causes compared to baseline
and current state-of-the-art methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jaggard:2013:DSP,
author = "Aaron D. Jaggard and Swara Kopparty and Vijay
Ramachandran and Rebecca N. Wright",
title = "The design space of probing algorithms for
network-performance measurement",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "105--116",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465765",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a framework for the design and analysis of
probing methods to monitor network performance, an
important technique for collecting measurements in
tasks such as fault detection. We use this framework to
study the interaction among numerous, possibly
conflicting, optimization goals in the design of a
probing algorithm. We present a rigorous definition of
a probing-algorithm design problem that can apply
broadly to network-measurement scenarios. We also
present several metrics relevant to the analysis of
probing algorithms, including probing frequency and
network coverage, communication and computational
overhead, and the amount of algorithm state required.
We show inherent tradeoffs among optimization goals and
give hardness results for achieving some combinations
of optimization goals. We also consider the possibility
of developing approximation algorithms for achieving
some of the goals and describe a randomized approach as
an alternative, evaluating it using our framework. Our
work aids future development of low-overhead probing
techniques and introduces principles from IP-based
networking to theoretically grounded approaches for
concurrent path-selection problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bouman:2013:DMT,
author = "Niek Bouman and Sem Borst and Johan van Leeuwaarden",
title = "Delays and mixing times in random-access networks",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "117--128",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465759",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We explore the achievable delay performance in
wireless random-access networks. While relatively
simple and inherently distributed in nature, suitably
designed backlog-based random-access schemes provide
the striking capability to match the optimal throughput
performance of centralized scheduling mechanisms. The
specific type of activation rules for which throughput
optimality has been established, may however yield
excessive backlogs and delays. Motivated by that issue,
we examine whether the poor delay performance is
inherent to the basic operation of these schemes, or
caused by the specific kind of activation rules. We
derive delay lower bounds for backlog-based activation
rules, which offer fundamental insight in the cause of
the excessive delays. For fixed activation rates we
obtain lower bounds indicating that delays and mixing
times can grow dramatically with the load in certain
topologies as well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cecchi:2013:SUM,
author = "Fabio Cecchi and Peter Jacko",
title = "Scheduling of users with {Markovian} time-varying
transmission rates",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "129--140",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We address the problem of developing a well-performing
and implementable scheduler of users with wireless
connection to the base station. The main feature of
such real-life systems is that the quality conditions
of the user channels are time-varying, which turn into
the time-varying transmission rate due to different
modulation and coding schemes. We assume that this
phenomenon follows a Markovian law and most of the
discussion is dedicated to the case of three quality
conditions of each user, for which we characterize an
optimal index policy and show that threshold policies
(of giving higher priority to users with higher
transmission rate) are not necessarily optimal. For the
general case of arbitrary number of quality conditions
we design a scheduler and propose its two practical
approximations, and illustrate the performance of the
proposed index-based schedulers and existing
alternatives in a variety of simulation scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simatos:2013:LID,
author = "Florian Simatos and Niek Bouman and Sem Borst",
title = "Lingering issues in distributed scheduling",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "141--152",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465758",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances have resulted in queue-based
algorithms for medium access control which operate in a
distributed fashion, and yet achieve the optimal
throughput performance of centralized scheduling
algorithms. However, fundamental performance bounds
reveal that the ``cautious'' activation rules involved
in establishing throughput optimality tend to produce
extremely large delays, typically growing exponentially
in 1/(1-r), with r the load of the system, in contrast
to the usual linear growth. Motivated by that issue, we
explore to what extent more ``aggressive'' schemes can
improve the delay performance. Our main finding is that
aggressive activation rules induce a lingering effect,
where individual nodes retain possession of a shared
resource for excessive lengths of time even while a
majority of other nodes idle. Using central limit
theorem type arguments, we prove that the idleness
induced by the lingering effect may cause the delays to
grow with 1/(1-r) at a quadratic rate. To the best of
our knowledge, these are the first mathematical results
illuminating the lingering effect and quantifying the
performance impact. In addition extensive simulation
experiments are conducted to illustrate and validate
the various analytical results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2013:EAM,
author = "Anshul Gandhi and Sherwin Doroudi and Mor
Harchol-Balter and Alan Scheller-Wolf",
title = "Exact analysis of the {M/M/k\slash setup} class of
{Markov} chains via recursive renewal reward",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "153--166",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465760",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The M/M/k/setup model, where there is a penalty for
turning servers on, is common in data centers, call
centers and manufacturing systems. Setup costs take the
form of a time delay, and sometimes there is
additionally a power penalty, as in the case of data
centers. While the M/M/1/setup was exactly analyzed in
1964, no exact analysis exists to date for the
M/M/k/setup with {k$>$1}. In this paper we provide the
first exact, closed-form analysis for the M/M/k/setup
and some of its important variants including systems in
which idle servers delay for a period of time before
turning off or can be put to sleep. Our analysis is
made possible by our development of a new technique,
Recursive Renewal Reward (RRR), for solving Markov
chains with a repeating structure. RRR uses ideas from
renewal reward theory and busy period analysis to
obtain closed-form expressions for metrics of interest
such as the transform of time in system and the
transform of power consumed by the system. The
simplicity, intuitiveness, and versatility of RRR makes
it useful for analyzing Markov chains far beyond the
M/M/k/setup. In general, RRR should be used to reduce
the analysis of any 2-dimensional Markov chain which is
infinite in at most one dimension and repeating to the
problem of solving a system of polynomial equations. In
the case where all transitions in the repeating portion
of the Markov chain are skip-free and all up/down
arrows are unidirectional, the resulting system of
equations will yield a closed-form solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsitsiklis:2013:QST,
author = "John N. Tsitsiklis and Kuang Xu",
title = "Queueing system topologies with limited flexibility",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "167--178",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465757",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a multi-server model with n flexible servers
and rn queues, connected through a fixed bipartite
graph, where the level of flexibility is captured by
the average degree, d(n), of the queues. Applications
in content replication in data centers, skill-based
routing in call centers, and flexible supply chains are
among our main motivations. We focus on the scaling
regime where the system size n tends to infinity, while
the overall traffic intensity stays fixed. We show that
a large capacity region (robustness) and diminishing
queueing delay (performance) are jointly achievable
even under very limited flexibility (d(n) l n). In
particular, when d(n) gg ln n, a family of
random-graph-based interconnection topologies is (with
high probability) capable of stabilizing all admissible
arrival rate vectors (under a bounded support
assumption), while simultaneously ensuring a
diminishing queueing delay, of order ln n/ d(n), as
n-{$>$} \infty. Our analysis is centered around a new
class of virtual-queue-based scheduling policies that
rely on dynamically constructed partial matchings on
the connectivity graph.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2013:SML,
author = "Yongkun Li and Patrick P. C. Lee and John C. S. Lui",
title = "Stochastic modeling of large-scale solid-state storage
systems: analysis, design tradeoffs and optimization",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "179--190",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Solid state drives (SSDs) have seen wide deployment in
mobiles, desktops,and data centers due to their high
I/O performance and low energy consumption. As SSDs
write data out-of-place, garbage collection (GC) is
required to erase and reclaim space with invalid data.
However, GC poses additional writes that hinder the I/O
performance, while SSD blocks can only endure a finite
number of erasures. Thus, there is a
performance-durability tradeoff on the design space of
GC. To characterize the optimal tradeoff, this paper
formulates an analytical model that explores the full
optimal design space of any GC algorithm. We first
present a stochastic Markov chain model that captures
the I/O dynamics of large-scale SSDs, and adapt the
mean-field approach to derive the asymptotic
steady-state performance. We further prove the model
convergence and generalize the model for all types of
workload. Inspired by this model, we propose a
randomized greedy algorithm (RGA) that can operate
along the optimal tradeoff curve with a tunable
parameter. Using trace-driven simulation on DiskSim
with SSD add-ons, we demonstrate how RGA can be
parameterized to realize the performance-durability
tradeoff.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2013:MFM,
author = "Benny {Van Houdt}",
title = "A mean field model for a class of garbage collection
algorithms in flash-based solid state drives",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "191--202",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Garbage collection (GC) algorithms play a key role in
reducing the write amplification in flash-based solid
state drives, where the write amplification affects the
lifespan and speed of the drive. This paper introduces
a mean field model to assess the write amplification
and the distribution of the number of valid pages per
block for a class C of GC algorithms. Apart from the
Random GC algorithm, class C includes two novel GC
algorithms: the d-Choices GC algorithm, that selects d
blocks uniformly at random and erases the block
containing the least number of valid pages among the
$d$ selected blocks, and the Random++ GC algorithm,
that repeatedly selects another block uniformly at
random until it finds a block with a lower than average
number of valid blocks. Using simulation experiments we
show that the proposed mean field model is highly
accurate in predicting the write amplification (for
drives with $ N = 50000 $ blocks). We further show that
the d-Choices GC algorithm has a write amplification
close to that of the Greedy GC algorithm even for small
d values, e.g., d = 10, and offers a more attractive
trade-off between its simplicity and its performance
than the Windowed GC algorithm introduced and analyzed
in earlier studies. The Random++ algorithm is shown to
be less effective as it is even inferior to the FIFO
algorithm when the number of pages $b$ per block is
large (e.g., for b {$>$}= 64).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jung:2013:RWH,
author = "Myoungsoo Jung and Mahmut Kandemir",
title = "Revisiting widely held {SSD} expectations and
rethinking system-level implications",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "203--216",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Storage applications leveraging Solid State Disk (SSD)
technology are being widely deployed in diverse
computing systems. These applications accelerate system
performance by exploiting several SSD-specific
characteristics. However, modern SSDs have undergone a
dramatic technology and architecture shift in the past
few years, which makes widely held assumptions and
expectations regarding them highly questionable. The
main goal of this paper is to question popular
assumptions and expectations regarding SSDs through an
extensive experimental analysis using 6
state-of-the-art SSDs from different vendors. Our
analysis leads to several conclusions which are either
not reported in prior SSD literature, or contradict to
current conceptions. For example, we found that SSDs
are not biased toward read-intensive workloads in terms
of performance and reliability. Specifically, random
read performance of SSDs is worse than sequential and
random write performance by 40\% and 39\% on average,
and more importantly, the performance of sequential
reads gets significantly worse over time. Further, we
found that reads can shorten SSD lifetime more than
writes, which is very unfortunate, given the fact that
many existing systems/platforms already employ SSDs as
read caches or in applications that are highly read
intensive. We also performed a comprehensive study to
understand the worst-case performance characteristics
of our SSDs, and investigated the viability of recently
proposed enhancements that are geared towards
alleviating the worst-case performance challenges, such
as TRIM commands and background-tasks. Lastly, we
uncover the overheads of these enhancements and their
limits, and discuss system-level implications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cintra:2013:CIP,
author = "Marcelo Cintra and Niklas Linkewitsch",
title = "Characterizing the impact of process variation on
write endurance enhancing techniques for non-volatile
memory systems",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "217--228",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465755",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Much attention has been given recently to a set of
promising non-volatile memory technologies, such as
PCM, STT-MRAM, and ReRAM. These, however, have limited
endurance relative to DRAM. Potential solutions to this
endurance challenge exist in the form of fine-grain
wear leveling techniques and aggressive error tolerance
approaches. While the existing approaches to wear
leveling and error tolerance are sound and demonstrate
true potential, their studies have been limited in that
(i) they have not considered the interactions between
wear leveling and error tolerance and (ii) they have
assumed a simple write endurance failure model where
all cells fail uniformly. In this paper we perform a
thorough study and characterize such interactions and
the effects of more realistic non-uniform endurance
models under various workloads, both synthetic and
derived from benchmarks. This study shows that, for
instance, variability in the endurance of cells
significantly affects wear leveling and error tolerance
mechanisms and the values of their tuning parameters.
It also shows that these mechanisms interact in subtle
ways, sometimes cancelling and sometimes boosting each
other's impact on overall endurance of the device.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2013:DCS,
author = "Abhigyan Sharma and Arun Venkataramani and Ramesh K.
Sitaraman",
title = "Distributing content simplifies {ISP} traffic
engineering",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "229--242",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several major Internet service providers today also
offer content distribution services. The emergence of
such ``network-CDNs'' (NCDNs) is driven both by market
forces as well as the cost of carrying ever-increasing
volumes of traffic across their backbones. An NCDN has
the flexibility to determine both where content is
placed and how traffic is routed within the network.
However NCDNs today continue to treat traffic
engineering independently from content placement and
request redirection decisions. In this paper, we
investigate the interplay between content distribution
strategies and traffic engineering and ask whether or
how an NCDN should address these concerns in a joint
manner. Our experimental analysis, based on traces from
a large content distribution network and real ISP
topologies, shows that realistic (i.e., history-based)
joint optimization strategies offer little benefit (and
often significantly underperform) compared to simple
and ``unplanned'' strategies for routing and placement
such as InverseCap and LRU. We also find that the
simpler strategies suffice to achieve network cost and
user-perceived latencies close to those of a
joint-optimal strategy with future knowledge.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Valancius:2013:QBJ,
author = "Vytautas Valancius and Bharath Ravi and Nick Feamster
and Alex C. Snoeren",
title = "Quantifying the benefits of joint content and network
routing",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "243--254",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Online service providers aim to provide good
performance for an increasingly diverse set of
applications and services. One of the most effective
ways to improve service performance is to replicate the
service closer to the end users. Replication alone,
however, has its limits: while operators can replicate
static content, wide-scale replication of dynamic
content is not always feasible or cost effective. To
improve the latency of such services many operators
turn to Internet traffic engineering. In this paper, we
study the benefits of performing replica-to-end-user
mappings in conjunction with active Internet traffic
engineering. We present the design of PECAN, a system
that controls both the selection of replicas (``content
routing'') and the routes between the clients and their
associated replicas (``network routing''). We emulate a
replicated service that can perform both content and
network routing by deploying PECAN on a distributed
testbed. In our testbed, we see that jointly performing
content and network routing can reduce round-trip
latency by 4.3\% on average over performing content
routing alone (potentially reducing service response
times by tens of milliseconds or more) and that most of
these gains can be realized with no more than five
alternate routes at each replica.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simha:2013:HTL,
author = "Dilip Nijagal Simha and Tzi-cker Chiueh and Ganesh
Karuppur Rajagopalan and Pallav Bose",
title = "High-throughput low-latency fine-grained disk
logging",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "255--266",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Synchronously logging updates to persistent storage
first and then asynchronously committing these updates
to their rightful storage locations is a well-known and
heavily used technique to improve the sustained
throughput of write-intensive disk-based data
processing systems, whose latency and throughput
accordingly are largely determined by the latency and
throughput of the underlying logging mechanism. The
conventional wisdom is that logging operations are
relatively straightforward to optimize because the
associated disk access pattern is largely sequential.
However, it turns out that to achieve both high
throughput and low latency for fine-grained logging
operations, whose payload size is smaller than a disk
sector, is extremely challenging. This paper describes
the experiences and lessons we have gained from
building a disk logging system that can successfully
deliver over 1.2 million 256-byte logging operations
per second, with the average logging latency below 1
msec.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tudor:2013:UEC,
author = "Bogdan Marius Tudor and Yong Meng Teo",
title = "On understanding the energy consumption of {ARM}-based
multicore servers",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "267--278",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is growing interest to replace traditional
servers with low-power multicore systems such as ARM
Cortex-A9. However, such systems are typically
provisioned for mobile applications that have lower
memory and I/O requirements than server application.
Thus, the impact and extent of the imbalance between
application and system resources in exploiting energy
efficient execution of server workloads is unclear.
This paper proposes a trace-driven analytical model for
understanding the energy performance of server
workloads on ARM Cortex-A9 multicore systems. Key to
our approach is the modeling of the degrees of CPU
core, memory and I/O resource overlap, and in
estimating the number of cores and clock frequency that
optimizes energy performance without compromising
execution time. Since energy usage is the product of
utilized power and execution time, the model first
estimates the execution time of a program. CPU time,
which accounts for both cores and memory response time,
is modeled as an M/G/1 queuing system. Workload
characterization of high performance computing, web
hosting and financial computing applications shows that
bursty memory traffic fits a Pareto distribution, and
non-bursty memory traffic is exponentially distributed.
Our analysis using these server workloads reveals that
not all server workloads might benefit from higher
number of cores or clock frequencies. Applying our
model, we predict the configurations that increase
energy efficiency by 10\% without turning off cores,
and up to one third with shutting down unutilized
cores. For memory-bounded programs, we show that the
limited memory bandwidth might increase both execution
time and energy usage, to the point where energy cost
might be higher than on a typical x64 multicore system.
Lastly, we show that increasing memory and I/O
bandwidth can improve both the execution time and the
energy usage of server workloads on ARM Cortex-A9
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sen:2013:RBO,
author = "Rathijit Sen and David A. Wood",
title = "Reuse-based online models for caches",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "279--292",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a reuse distance/stack distance based
analytical modeling framework for efficient, online
prediction of cache performance for a range of cache
configurations and replacement policies LRU, PLRU,
RANDOM, NMRU. Our framework unifies existing cache miss
rate prediction techniques such as Smith's
associativity model, Poisson variants, and hardware
way-counter based schemes. We also show how to adapt
LRU way-counters to work when the number of sets in the
cache changes. As an example application, we
demonstrate how results from our models can be used to
select, based on workload access characteristics,
last-level cache configurations that aim to minimize
energy-delay product.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shahzad:2013:POT,
author = "Muhammad Shahzad and Alex X. Liu",
title = "Probabilistic optimal tree hopping for {RFID}
identification",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "293--304",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Radio Frequency Identification (RFID) systems are
widely used in various applications such as supply
chain management, inventory control, and object
tracking. Identifying RFID tags in a given tag
population is the most fundamental operation in RFID
systems. While the Tree Walking (TW) protocol has
become the industrial standard for identifying RFID
tags, little is known about the mathematical nature of
this protocol and only some ad-hoc heuristics exist for
optimizing it. In this paper, first, we analytically
model the TW protocol, and then using that model,
propose the Tree Hopping (TH) protocol that optimizes
TW both theoretically and practically. The key novelty
of TH is to formulate tag identification as an
optimization problem and find the optimal solution that
ensures the minimal average number of queries. With
this solid theoretical underpinning, for different tag
population sizes ranging from 100 to 100K tags, TH
significantly outperforms the best prior tag
identification protocols on the metrics of the total
number of queries per tag, the total identification
time per tag, and the average number of responses per
tag by an average of 50\%, 10\%, and 30\%,
respectively, when tag IDs are uniformly distributed in
the ID space, and of 26\%, 37\%, and 26\%,
respectively, when tag IDs are non-uniformly
distributed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peng:2013:MTA,
author = "Qiuyu Peng and Anwar Walid and Steven H. Low",
title = "Multipath {TCP} algorithms: theory and design",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "305--316",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2466585",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-path TCP (MP-TCP) has the potential to greatly
improve application performance by using multiple paths
transparently. We propose a fluid model for a large
class of MP-TCP algorithms and identify design criteria
that guarantee the existence, uniqueness, and stability
of system equilibrium. We characterize algorithm
parameters for TCP-friendliness and prove an inevitable
tradeoff between responsiveness and friendliness. We
discuss the implications of these properties on the
behavior of existing algorithms and motivate a new
design that generalizes existing algorithms. We use ns2
simulations to evaluate the proposed algorithm and
illustrate its superior overall performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2013:TAU,
author = "Guang Tan and Zhimeng Yin and Hongbo Jiang",
title = "Trap array: a unified model for scalability evaluation
of geometric routing",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "317--328",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable routing for large-scale wireless networks
needs to find near shortest paths with low state on
each node, preferably sub-linear with the network size.
Two approaches are considered promising toward this
goal: compact routing and geometric routing
(geo-routing). To date the two lines of research have
been largely independent, perhaps because of the
distinct principles they follow. In particular, it
remains unclear how they compare with each other in the
worst case, despite extensive experimental results
showing the superiority of one or another in particular
cases. We develop a novel Trap Array topology model
that provides a unified framework to uncover the
limiting behavior of ten representative geo-routing
algorithms. We present a series of new theoretical
results, in comparison with the performance of compact
routing as a baseline. In light of their pros and cons,
we further design a Compact Geometric Routing (CGR)
algorithm that attempts to leverage the benefits of
both approaches. Theoretic analysis and simulations
show the advantages of the topology model and the
algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andrew:2013:TTM,
author = "Lachlan Andrew and Siddharth Barman and Katrina Ligett
and Minghong Lin and Adam Meyerson and Alan Roytman and
Adam Wierman",
title = "A tale of two metrics: simultaneous bounds on
competitiveness and regret",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "329--330",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2013:AGA,
author = "Zhibin Yu and Lieven Eeckhout and Nilanjan Goswami and
Tao Li and Lizy John and Hai Jin and Chengzhong Xu",
title = "Accelerating {GPGPU} architecture simulation",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "331--332",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/pvm.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, graphics processing units (GPUs) have opened
up new opportunities for speeding up general-purpose
parallel applications due to their massive
computational power and up to hundreds of thousands of
threads enabled by programming models such as CUDA.
However, due to the serial nature of existing
micro-architecture simulators, these massively parallel
architectures and workloads need to be simulated
sequentially. As a result, simulating GPGPU
architectures with typical benchmarks and input data
sets is extremely time-consuming. This paper addresses
the GPGPU architecture simulation challenge by
generating miniature, yet representative GPGPU kernels.
We first summarize the static characteristics of an
existing GPGPU kernel in a profile, and analyze its
dynamic behavior using the novel concept of the
divergence flow statistics graph (DFSG). We
subsequently use a GPGPU kernel synthesizing framework
to generate a miniature proxy of the original kernel,
which can reduce simulation time significantly. The key
idea is to reduce the number of simulated instructions
by decreasing per-thread iteration counts of loops. Our
experimental results show that our approach can
accelerate GPGPU architecture simulation by a factor of
88X on average and up to 589X with an average IPC
relative error of 5.6\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2013:AAC,
author = "Di Wang and Chuangang Ren and Sriram Govindan and
Anand Sivasubramaniam and Bhuvan Urgaonkar and Aman
Kansal and Kushagra Vaid",
title = "{ACE}: abstracting, characterizing and exploiting
peaks and valleys in datacenter power consumption",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "333--334",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peak power management of datacenters has tremendous
cost implications. While numerous mechanisms have been
proposed to cap power consumption, real datacenter
power consumption data is scarce. To address this gap,
we collect power demands at multiple spatial and
fine-grained temporal resolutions from the load of
geo-distributed datacenters of Microsoft over 6 months.
We conduct aggregate analysis of this data, to study
its statistical properties. With workload
characterization a key ingredient for systems design
and evaluation, we note the importance of better
abstractions for capturing power demands, in the form
of peaks and valleys. We identify and characterize
attributes for peaks and valleys, and important
correlations across these attributes that can influence
the choice and effectiveness of different power capping
techniques. With the wide scope of exploitability of
such characteristics for power provisioning and
optimizations, we illustrate its benefits with two
specific case studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Potharaju:2013:EAI,
author = "Rahul Potharaju and Navendu Jain",
title = "An empirical analysis of intra- and inter-datacenter
network failures for geo-distributed services",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "335--336",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465749",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As cloud services continue to grow, a key requirement
is delivering an 'always-on' experience to end users.
Of the several factors affecting service availability,
network failures in the hosting datacenters have
received little attention. This paper presents a
preliminary analysis of intra-datacenter and
inter-datacenter network failures from a service
perspective. We describe an empirical study analyzing
and correlating network failure events over an year
across multiple datacenters in a service provider. Our
broader goal is to outline steps leveraging existing
network mechanisms to improve end-to-end service
availability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mazauric:2013:CAC,
author = "Dorian Mazauric and Saleh Soltan and Gil Zussman",
title = "Computational analysis of cascading failures in power
networks",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "337--338",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465752",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on cascading line failures in the
transmission system of the power grid. Such a cascade
may have a devastating effect not only on the power
grid but also on the interconnected communication
networks. Recent large-scale power outages demonstrated
the limitations of epidemic- and percolation-based
tools in modeling the cascade evolution. Hence, based
on a linearized power flow model (that substantially
differs from the classical packet flow models), we
obtain results regarding the various properties of a
cascade. Specifically, we consider performance metrics
such as the distance between failures, the length of
the cascade, and the fraction of demand (load)
satisfied after the cascade. We show, for example, that
due to the unique properties of the model: (i) the
distance between subsequent failures can be arbitrarily
large and the cascade may be arbitrarily long, (ii) a
large set of initial line failures may have a smaller
effect than a failure of one of the lines in the set,
and (iii) minor changes to the network parameters may
have a significant impact. Moreover, we show that
finding the set of lines whose removal has the most
significant impact (under various metrics) is NP-Hard.
Moreover, we develop a fast algorithm to recompute the
flows at each step of the cascade. The results can
provide insight into the design of smart grid
measurement and control algorithms that can mitigate a
cascade.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:2013:DCA,
author = "John C. Nelson and Jonathan Connell and Canturk Isci
and Jonathan Lenchner",
title = "Data center asset tracking using a mobile robot",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "339--340",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2466584",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Management and monitoring of data centers is a growing
field of interest, with much current research, and the
emergence of a variety of commercial products aiming to
improve performance, resource utilization and energy
efficiency of the computing infrastructure. Despite the
large body of work on optimizing data center
operations, few studies actually focus on discovering
and tracking the physical layout of assets in these
centers. Such asset tracking is a prerequisite to
faithfully performing administration and any form of
optimization that relies on physical layout
characteristics. In this work, we describe an approach
to completely automated asset tracking in data centers,
employing a vision-based mobile robot in conjunction
with an ability to manipulate the indicator LEDs in
blade centers and storage arrays. Unlike previous
large-scale asset-tracking methods, our approach does
not require the tagging of assets (e.g., with RFID tags
or barcodes), thus saving considerable expense and
human labor. The approach is validated through a series
of experiments in a production industrial data
center.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2013:DCD,
author = "Zhenhua Liu and Adam Wierman and Yuan Chen and
Benjamin Razon and Niangjun Chen",
title = "Data center demand response: avoiding the coincident
peak via workload shifting and local generation",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "341--342",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465740",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Demand response is a crucial aspect of the future
smart grid. It has the potential to provide significant
peak demand reduction and to ease the incorporation of
renewable energy into the grid. Data centers'
participation in demand response is becoming
increasingly important given the high and increasing
energy consumption and the flexibility in demand
management in data centers compared to conventional
industrial facilities. In this extended abstract we
briefly describe recent work in our full paper on two
demand response schemes to reduce a data center's peak
loads and energy expenditure: workload shifting and the
use of local power generations. In our full paper, we
conduct a detailed characterization study of coincident
peak data over two decades from Fort Collins Utilities,
Colorado and then develop two algorithms for data
centers by combining workload scheduling and local
power generation to avoid the coincident peak and
reduce the energy expenditure. The first algorithm
optimizes the expected cost and the second one provides
a good worst-case guarantee for any coincident peak
pattern. We evaluate these algorithms via numerical
simulations based on real world traces from production
systems. The results show that using workload shifting
in combination with local generation can provide
significant cost savings (up to 40\% in the Fort
Collins Utilities' case) compared to either alone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saez:2013:DFP,
author = "Juan Carlos S{\'a}ez and Fernando Castro and Daniel
Chaver and Manuel Prieto",
title = "Delivering fairness and priority enforcement on
asymmetric multicore systems via {OS} scheduling",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "343--344",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/multithreading.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Symmetric-ISA (instruction set architecture)
asymmetric-performance multicore processors (AMPs) were
shown to deliver higher performance per watt and area
than symmetric CMPs for applications with diverse
architectural requirements. So, it is likely that
future multicore processors will combine big
power-hungry fast cores and small low-power slow ones.
In this paper, we propose a novel thread scheduling
algorithm that aims to improve the throughput-fairness
trade-off on AMP systems. Our experimental evaluation
on real hardware and using scheduler implementations on
a general-purpose operating system, reveals that our
proposal delivers a better throughput-fairness
trade-off than previous schedulers for a wide variety
of multi-application workloads including
single-threaded and multithreaded applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arvidsson:2013:DUD,
author = "Ake Arvidsson and Ying Zhang",
title = "Detecting user dissatisfaction and understanding the
underlying reasons",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "345--346",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Quantifying quality of experience for network
applications is challenging as it is a subjective
metric with multiple dimensions such as user
expectation, satisfaction, and overall experience.
Today, despite various techniques to support
differentiated Quality of Service (QoS), the operators
still lack of automated methods to translate QoS to
QoE, especially for general web applications. In this
work, we take the approach of identifying
unsatisfactory performance by searching for user
initiated early terminations of web transactions from
passive monitoring. However, user early abortions can
be caused by other factors such as loss of interests.
Therefore, naively using them to represent user
dissatisfaction will result in large false positives.
In this paper, we propose a systematic method for
inferring user dissatisfaction from the set of early
abortion behaviors observed from identifying the
traffic traces. We conduct a comprehensive analysis on
the user acceptance of throughput and response time,
and compare them with the traditional MOS metric. Then
we present the characteristics of early cancellation
from dimensions like the types of URLs and objects. We
evaluate our approach on four data sets collected in
both wireline network and a wireless cellular
network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kong:2013:DMD,
author = "Deguang Kong and Guanhua Yan",
title = "Discriminant malware distance learning on structural
information for automated malware classification",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "347--348",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we explore techniques that can
automatically classify malware variants into their
corresponding families. Our framework extracts
structural information from malware programs as
attributed function call graphs, further learns
discriminant malware distance metrics, finally adopts
an ensemble of classifiers for automated malware
classification. Experimental results show that our
method is able to achieve high classification
accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peserico:2013:EP,
author = "Enoch Peserico",
title = "Elastic paging",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "349--350",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2479781",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a generalization of the classic paging
problem where memory capacity can vary over time --- a
property of many modern computing realities, from cloud
computing to multi-core and energy-optimized
processors. We show that good performance in the
``classic'' case provides no performance guarantees
when memory capacity fluctuates: roughly speaking,
moving from static to dynamic capacity can mean the
difference between optimality within a factor 2 in
space, time and energy, and suboptimality by an
arbitrarily large factor. Surprisingly, several classic
paging algorithms still perform remarkably well,
maintaining that factor 2 optimality even if faced with
adversarial capacity fluctuations --- without taking
those fluctuations into explicit account!",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gan:2013:ECR,
author = "Lingwen Gan and Na Li and Steven Low and Ufuk Topcu",
title = "Exact convex relaxation for optimal power flow in
distribution networks",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "351--352",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The optimal power flow (OPF) problem seeks to control
the power generation/consumption to minimize the
generation cost, and is becoming important for
distribution networks. OPF is nonconvex and a
second-order cone programming (SOCP) relaxation has
been proposed to solve it. We prove that after a
``small'' modification to OPF, the SOCP relaxation is
exact under a ``mild'' condition. Empirical studies
demonstrate that the modification to OPF is ``small''
and that the ``mild'' condition holds for all test
networks, including the IEEE 13-bus test network and
practical networks with high penetration of distributed
generation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwak:2013:EPR,
author = "Jaewook Kwak and Chul-Ho Lee and Do Young Eun",
title = "Exploiting the past to reduce delay in {CSMA}
scheduling: a high-order {Markov} chain approach",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "353--354",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently several CSMA algorithms based on the Glauber
dynamics model have been proposed for multihop wireless
scheduling, as viable solutions to achieve the
throughput optimality, yet are simple to implement.
However, their delay performances still remain
unsatisfactory, mainly due to the nature of the
underlying Markov chains that imposes a fundamental
constraint on how the link state can evolve over time.
In this paper, we propose a new approach toward better
queueing and delay performance, based on our
observation that the algorithm needs not be Markovian,
as long as it can be implemented in a distributed
manner, achieving the same throughput optimality and
better delay performance. Our approach hinges upon
utilizing past state information observed by local link
and then constructing a high-order Markov chain for the
evolution of the feasible link schedules. Our proposed
algorithm, named delayed CSMA, adds virtually no
additional overhead onto the existing CSMA-based
algorithms, achieves the throughput optimality under
the usual choice of link weight as a function of queue
length, and also provides much better delay performance
by effectively resolving temporal link starvation
problem. From our extensive simulations we observe that
the delay under our algorithm can be often reduced by a
factor of 20 over a wide range of scenarios, compared
to the standard Glauber-dynamics-based CSMA
algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paredes-Oliva:2013:FFR,
author = "Ignasi Paredes-Oliva and Pere Barlet-Ros and
Xenofontas Dimitropoulos",
title = "{FaRNet}: fast recognition of high multi-dimensional
network traffic patterns",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "355--356",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465743",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Extracting knowledge from big network traffic data is
a matter of foremost importance for multiple purposes
ranging from trend analysis or network troubleshooting
to capacity planning or traffic classification. An
extremely useful approach to profile traffic is to
extract and display to a network administrator the
multi-dimensional hierarchical heavy hitters (HHHs) of
a dataset. However, existing schemes for computing HHHs
have several limitations: (1) they require significant
computational overhead; (2) they do not scale to high
dimensional data; and (3) they are not easily
extensible. In this paper, we introduce a fundamentally
new approach for extracting HHHs based on generalized
frequent item-set mining (FIM), which allows to process
traffic data much more efficiently and scales to much
higher dimensional data than present schemes. Based on
generalized FIM, we build and evaluate a traffic
profiling system we call FaRNet. Our comparison with
AutoFocus, which is the most related tool of similar
nature, shows that FaRNet is up to three orders of
magnitude faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghiassi-Farrokhfal:2013:FSP,
author = "Yashar Ghiassi-Farrokhfal and Srinivasan Keshav and
Catherine Rosenberg and Florin Ciucu",
title = "Firming solar power",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "357--358",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465744",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The high variability of solar power due to intrinsic
diurnal variability, as well as additional stochastic
variations due to cloud cover, have made it difficult
for solar farms to participate in electricity markets
that require pre-committed constant power generation.
We study the use of battery storage to 'firm' solar
power, that is, to remove variability so that such a
pre-commitment can be made. Due to the high cost of
storage, it is necessary to size the battery
parsimoniously, choosing the minimum size to meet a
certain reliability guarantee. Inspired by recent work
that identifies an isomorphism between batteries and
network buffers, we introduce a new model for solar
power generation that models it as a stochastic traffic
source. This permits us to use techniques from the
stochastic network calculus to both size storage and to
maximize the revenue that a solar farm owner can make
from the day-ahead power market. Using a 10-year of
recorded solar irradiance, we show that our approach
attains 93\% of the maximum revenue in a summer day
that would have been achieved in daily market had the
entire solar irradiance trace been known ahead of
time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2013:GNL,
author = "Yi Wang and Dongzhe Tai and Ting Zhang and Jianyuan Lu
and Boyang Xu and Huichen Dai and Bin Liu",
title = "Greedy name lookup for named data networking",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "359--360",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465741",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/hash.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Different from the IP-based routers, Named Data
Networking routers forward packets by content names,
which consist of characters and have variable and
unbounded length. This kind of complex name
constitution plus the huge-sized name routing table
makes wire speed name lookup an extremely challenging
task. Greedy name lookup mechanism is proposed to speed
up name lookup by dynamically adjusting the search path
against the changes of the prefix table. Meanwhile, we
elaborate a string-oriented perfect hash table to
reduce memory consumption which stores the signature of
the key in the entry instead of the key itself.
Extensive experimental results on a commodity PC server
with 3 million name prefix entries demonstrate that
greedy name lookup mechanism achieves 57.14 million
searches per second using only 72.95 MB memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dong:2013:HDE,
author = "Mian Dong and Tian Lan and Lin Zhong",
title = "How does energy accounting matter for energy
management?",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "361--362",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465742",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moharir:2013:OLB,
author = "Sharayu Moharir and Sujay Sanghavi and Sanjay
Shakkottai",
title = "Online load balancing under graph constraints",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "363--364",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In several data center settings, each arriving job may
only be served by one of a subset of servers. Such a
graph constraint can arise due to several reasons. One
is locality of the data needed by a job; for example,
in content farms (e.g. in Netflix or YouTube) a video
request can only be served by a machine that possesses
a copy. Motivated by this, we consider a setting where
each job, on arrival, reveals a deadline and a subset
of servers that can serve it. The job needs to be
immediately allocated to one of these servers, and
cannot be moved thereafter. Our objective is to
maximize the fraction of jobs that are served before
their deadlines. For this online load balancing
problem, we prove an upper bound of 1-1/e on the
competitive ratio of non-preemptive online algorithms
for systems with a large number of servers. We propose
an algorithm --- INSERT RANKING --- which achieves this
upper bound. The algorithm makes decisions in a
correlated random way and it is inspired by the work of
Karp, Vazirani and Vazirani on online matching for
bipartite graphs. We also show that two more natural
algorithm, based on independent randomness, are
strictly suboptimal, with a competitive ratio of 1/2.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kambadur:2013:PSP,
author = "Melanie Kambadur and Kui Tang and Joshua Lopez and
Martha A. Kim",
title = "Parallel scaling properties from a basic block view",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "365--366",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465748",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/multithreading.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As software scalability lags behind hardware
parallelism, understanding scaling behavior is more
important than ever. This paper demonstrates how to use
Parallel Block Vector (PBV) profiles to measure the
scaling properties of multithreaded programs from a new
perspective: the basic block's view. Through this lens,
we guide users through quick and simple methods to
produce high-resolution application scaling analyses.
This method requires no manual program modification,
new hardware, or lengthy simulations, and captures the
impact of architecture, operating systems, threading
models, and inputs. We apply these techniques to a set
of parallel benchmarks, and, as an example, demonstrate
that when it comes to scaling, functions in an
application do not behave monolithically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2013:SBS,
author = "Florin Ciucu and Felix Poloczek and Jens Schmitt",
title = "Sharp bounds in stochastic network calculus",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "367--368",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465746",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The practicality of the stochastic network calculus
(SNC) is often questioned on grounds of potential
looseness of its performance bounds. In this paper it
is uncovered that for bursty arrival processes
(specifically Markov-Modulated On-Off (MMOO)), whose
amenability to per-flow analysis is typically
proclaimed as a highlight of SNC, the bounds can
unfortunately indeed be very loose (e.g., by several
orders of magnitude off). In response to this uncovered
weakness of SNC, the (Standard) per-flow bounds are
herein improved by deriving a general sample-path
bound, using martingale based techniques, which
accommodates FIFO, SP, and EDF scheduling disciplines.
The obtained (Martingale) bounds capture an additional
exponential decay factor of O(e$^{- \alpha n}$ ) in the
number of flows $n$, and are remarkably accurate even
in multiplexing scenarios with few flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2013:SSU,
author = "Ji Zhu and Stratis Ioannidis and Nidhi Hegde and
Laurent Massoulie",
title = "Stable and scalable universal swarms",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "369--370",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hajek and Zhu recently showed that the BitTorrent
protocol can become unstable when peers depart
immediately after downloading all pieces of a file. In
light of this result, Zhou et al. propose bundling
swarms together, allowing peers to exchange pieces
across different swarms, and claim that such
``universal swarms'' can increase BitTorrent's
stability region. In this work, we formally
characterize the stability region of universal swarms
and show that they indeed exhibit excellent stability
properties. In particular, bundling allows a single
seed with limited upload capacity to serve an arbitrary
number of disjoint swarms if the arrival rate of peers
in each swarm is lower than the seed upload capacity.
Our result also shows that the stability region is
insensitive to peers' upload capacity, piece selection
policies and number of swarms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paschos:2013:SSP,
author = "Georgios S. Paschos and Leandros Tassiulas",
title = "Sustainability of service provisioning systems under
attack",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "371--372",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465747",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a resource allocation model that captures
the interaction between legitimate users of a
distributed service provisioning system with malicious
intruders attempting to disrupt its operation. The
system consists of a bank of servers providing service
to incoming requests. Malicious intruders generate fake
traffic to the servers attempting to degrade service
provisioning. Legitimate traffic may be balanced using
available mechanisms in order to mitigate the damage
from the attack. We characterize the guaranteed region,
i.e. the set of legitimate traffic intensities that are
sustainable given specific intensities of the fake
traffic, under the assumption that the fake traffic is
routed using static policies. This assumption will be
relaxed, allowing arbitrary routing policies, in the
full version of this work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2013:TAW,
author = "Hong Xu and Chen Feng and Baochun Li",
title = "Temperature aware workload management in
geo-distributed datacenters",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "373--374",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Datacenters consume an enormous amount of energy with
significant financial and environmental costs. For
geo-distributed datacenters, a workload management
approach that routes user requests to locations with
cheaper and cleaner electricity has been shown to be
promising lately. We consider two key aspects that have
not been explored in this approach. First, through
empirical studies, we find that the energy efficiency
of the cooling system depends directly on the ambient
temperature, which exhibits a significant degree of
geographical diversity. Temperature diversity can be
used by workload management to reduce the overall
cooling energy overhead. Second, energy consumption
comes from not only interactive workloads driven by
user requests, but also delay tolerant batch workloads
that run at the back-end. The elastic nature of batch
workloads can be exploited to further reduce the energy
cost. In this work, we propose to make workload
management for geo-distributed datacenters temperature
aware. We formulate the problem as a joint optimization
of request routing for interactive workloads and
capacity allocation for batch workloads. We develop a
distributed algorithm based on an m-block alternating
direction method of multipliers (ADMM) algorithm that
extends the classical 2-block algorithm. We prove the
convergence and rate of convergence results under
general assumptions. Trace-driven simulations
demonstrate that our approach is able to provide
5\%--20\% overall cost savings for geo-distributed
datacenters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2013:TPH,
author = "Ming Li and Andrey Lukyanenko and Sasu Tarkoma and
Yong Cui and Antti Yl{\"a}-J{\"a}{\"a}ski",
title = "Tolerating path heterogeneity in multipath {TCP} with
bounded receive buffers",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "375--376",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465750",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2013:UAC,
author = "Chen Dai and Chao Lv and Jiaxin Li and Weihua Zhang
and Binyu Zang",
title = "Understanding architectural characteristics of
multimedia retrieval workloads",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "377--378",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balachandran:2013:UIV,
author = "Athula Balachandran and Vyas Sekar and Aditya Akella
and Srinivasan Seshan",
title = "Understanding {Internet} video viewing behavior in the
wild",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "379--380",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the past few years video viewership over the
Internet has risen dramatically and market predictions
suggest that video will account for more than 50\% of
the traffic over the Internet in the next few years.
Unfortunately, there has been signs that the Content
Delivery Network (CDN) infrastructure is being stressed
with the increasing video viewership load. Our goal in
this paper is to provide a first step towards a
principled understanding of how the content delivery
infrastructure must be designed and provisioned to
handle the increasing workload by analyzing video
viewing behaviors and patterns in the wild. We analyze
various viewing behaviors using a dataset consisting of
over 30 million video sessions spanning two months of
viewership from two large Internet video providers. In
these preliminary results, we observe viewing patterns
that have significant impact on the design of the video
delivery infrastructure.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2013:USS,
author = "Nan Jiang and Yu Jin and Ann Skudlark and Zhi-Li
Zhang",
title = "Understanding {SMS} spam in a large cellular network",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "381--382",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we conduct a comprehensive study of SMS
spam in a large cellular network in the US. Using one
year of user reported spam messages to the network
carrier, we devise text clustering techniques to group
associated spam messages in order to identify SMS spam
campaigns and spam activities. Our analysis shows that
spam campaigns can last for months and have a wide
impact on the cellular network. Combining with SMS
network records collected during the same time, we find
that spam numbers within the same activity often
exhibit strong similarity in terms of their sending
patterns, tenure and geolocations. Our analysis sheds
light on the intentions and strategies of SMS spammers
and provides unique insights in developing better
method for detecting SMS spam.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sundaresan:2013:WPB,
author = "Srikanth Sundaresan and Nazanin Magharei and Nick
Feamster and Renata Teixeira and Sam Crawford",
title = "{Web} performance bottlenecks in broadband access
networks",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "383--384",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465745",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the first large-scale analysis of Web
performance bottlenecks as measured from broadband
access networks, using data collected from extensive
home router deployments. We analyze the limits of
throughput on improving Web performance and identify
the contribution of critical factors such as DNS
lookups and TCP connection establishment to Web page
load times. We find that, as broadband speeds continue
to increase, other factors such as TCP connection setup
time, server response time, and network latency are
often dominant performance bottlenecks. Thus, realizing
a ``faster Web'' requires not only higher download
throughput, but also optimizations to reduce both
client and server-side latency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aguilera:2013:TGR,
author = "Marcos K. Aguilera",
title = "Tutorial on geo-replication in data center
applications",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "385--386",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2465768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data center applications increasingly require a
*geo-replicated* storage system, that is, a storage
system replicated across many geographic locations.
Geo-replication can reduce access latency, improve
availability, and provide disaster tolerance. It turns
out there are many techniques for geo-replication with
different trade-offs. In this tutorial, we give an
overview of these techniques, organized according to
two orthogonal dimensions: level of synchrony
(synchronous and asynchronous) and type of storage
service (read-write, state machine, transaction). We
explain the basic idea of these techniques, together
with their applicability and trade-offs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2013:FHT,
author = "Jayakrishnan Nair and Adam Wierman and Bert Zwart",
title = "The fundamentals of heavy-tails: properties,
emergence, and identification",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "387--388",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2466587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Heavy-tails are a continual source of excitement and
confusion across disciplines as they are repeatedly
``discovered'' in new contexts. This is especially true
within computer systems, where heavy-tails seemingly
pop up everywhere --- from degree distributions in the
internet and social networks to file sizes and
interarrival times of workloads. However, despite
nearly a decade of work on heavy-tails they are still
treated as mysterious, surprising, and even
controversial. The goal of this tutorial is to show
that heavy-tailed distributions need not be mysterious
and should not be surprising or controversial. In
particular, we will demystify heavy-tailed
distributions by showing how to reason formally about
their counter-intuitive properties; we will highlight
that their emergence should be expected (not
surprising) by showing that a wide variety of general
processes lead to heavy-tailed distributions; and we
will highlight that most of the controversy surrounding
heavy-tails is the result of bad statistics, and can be
avoided by using the proper tools.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schindler:2013:PAP,
author = "Jiri Schindler",
title = "Profiling and analyzing the {I/O} performance of
{NoSQL DBs}",
journal = j-SIGMETRICS,
volume = "41",
number = "1",
pages = "389--390",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2494232.2479782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:09:59 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The advent of the so-called NoSQL databases has
brought about a new model of using storage systems.
While traditional relational database systems took
advantage of features offered by centrally-managed,
enterprise-class storage arrays, the new generation of
database systems with weaker data consistency models is
content with using and managing locally attached
individual storage devices and providing data
reliability and availability through high-level
software features and protocols. This tutorial aims to
review the architecture of selected NoSQL DBs to lay
the foundations for understanding how these new DB
systems behave. In particular, it focuses on how
(in)efficiently these new systems use I/O and other
resources to accomplish their work. The tutorial
examines the behavior of several NoSQL DBs with an
emphasis on Cassandra --- a popular NoSQL DB system. It
uses I/O traces and resource utilization profiles
captured in private cloud deployments that use both
dedicated directly attached storage as well as shared
networked storage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gao:2013:SOC,
author = "X. Gao and y. Lu and M. Sharma and M. S. Squillante
and J. W. Bosman",
title = "Stochastic optimal control for a general class of
dynamic resource allocation problems",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "3--14",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a general class of dynamic resource
allocation problems within a stochastic optimal control
framework. This class of problems arises in a wide
variety of applications, each of which intrinsically
involves resources of different types and demand with
uncertainty and/or variability. The goal is to
dynamically allocate capacity for every resource type
in order to serve the uncertain/ variable demand and
maximize the expected net-benefit over a time horizon
of interest based on the rewards and costs associated
with the different resources. We derive the optimal
control policy within a singular control setting, which
includes easily implementable algorithms for governing
the dynamic adjustments to resource allocation
capacities over time. Numerical experiments investigate
various issues of both theoretical and practical
interest, quantifying the significant benefits of our
approach over alternative optimization approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2013:RCC,
author = "Predrag R. Jelenkovi{\'c} and Evangelia D. Skiani",
title = "Retransmissions over correlated channels",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "15--25",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Frequent failures characterize many existing
communication networks, e.g. wireless ad-hoc networks,
where retransmission- based failure recovery represents
a primary approach for successful data delivery. Recent
work has shown that retransmissions can cause power law
delays and instabilities even if all traffic and
network characteristics are super-exponential. While
the prior studies have considered an independent
channel model, in this paper we extend the analysis to
the practically important dependent case. We use
modulated processes, e.g. Markov modulated, to capture
the channel dependencies. We study the number of
retransmissions and delays when the hazard functions of
the distributions of data sizes and channel statistics
are proportional, conditionally on the channel state.
Our results show that the tails of the retransmission
and delay distributions are asymptotically insensitive
to the channel correlations and are determined by the
state that generates the lightest asymptotics. This
insight is beneficial both for capacity planning and
channel modeling since we do not need to account for
the correlation details. However, these results may be
overly optimistic when the best state is infrequent,
since the effects of 'bad' states may be prevalent for
sufficiently long to downgrade the expected
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mahmud:2013:OCP,
author = "A. Hasan Mahmud and Shaolei Ren",
title = "Online capacity provisioning for carbon-neutral data
center with demand-responsive electricity prices",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "26--37",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the huge electricity consumption and carbon
emissions, data center operators have been increasingly
pressured to reduce their net carbon footprints to
zero, i.e., carbon neutrality. In this paper, we
propose an efficient online algorithm, called CNDC
(optimization for Carbon-Neutral Data Center), to
control the number of active servers for minimizing the
data center operational cost (defined as a weighted sum
of electricity cost and delay cost) while satisfying
carbon neutrality without requiring long-term future
information. Unlike prior research on carbon
neutrality, we explore demand-responsive electricity
price enabled by the emerging smart grid technology and
demonstrate that it can be incorporated in data center
operation to reduce the operational cost. Leveraging
the Lyapunov optimization technique, we prove that CNDC
achieves a close-to-minimum operational cost compared
to the optimal algorithm with future information, while
bounding the potential violation of carbon neutrality,
in an almost arbitrarily random environment. We also
perform trace-based simulation as well as experiment
studies to complement the analysis. The results show
that CNDC reduces the cost by more than 20\% (compared
to state-of-the-art prediction-based algorithm) while
resulting in a smaller carbon footprint. Moreover, by
incorporating demand-response electricity prices, CNDC
can further decrease the average cost by approximately
2.5\%, translating into hundreds of thousands of
dollars per year.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Asghari:2013:OEM,
author = "Naser M. Asghari and Michel Mandjes and Anwar Walid",
title = "Optimizing energy management in multi-core servers",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "38--40",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518031",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we develop techniques for analyzing and
optimizing energy management in multi-core servers with
speed scaling capabilities. Our framework incorporates
the processor's dynamic power, and it also accounts for
other intricate and important power features such as
the static (leakage) power and switching overhead
between speed levels. Using stochastic fluid models to
capture traffic burst dynamics, we propose and study
different strategies for adapting the multi-core server
speeds based on the observable buffer content, so as to
optimize objective functions that balance energy
consumption and performance. It is shown that, for a
reasonable switching overhead and a small number of
thresholds, a substantial efficiency gain is achieved.
In addition, the optimal power consumptions of the
different strategies are hardly sensitive to
perturbations in the input parameters, so that the
performance is robust to misspecifications of the
system's input traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bekker:2013:SPS,
author = "R. Bekker and J. L. Dorsman and R. D. van der Mei and
P. Vis and E. M. M. Winands",
title = "Scheduling in polling systems in heavy traffic",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "41--43",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the classical cyclic polling model with
Poisson arrivals and with gated service at all queues,
but where the local scheduling policies are not
necessarily First-Come- First-Served (FCFS). More
precisely, we study the waiting-time performance of
polling models where the local service order is
Last-Come-First-Served (LCFS), Random-Orderof- Service
(ROS) or Processor Sharing (PS). Under heavytraffic
conditions the waiting times turn out to converge to
products of generalized trapezoidal distributions and a
gamma distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casas:2013:YSL,
author = "Pedro Casas and Michael Seufert and Raimund Schatz",
title = "{YOUQMON}: a system for on-line monitoring of {YouTube
QoE} in operational {3G} networks",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "44--46",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "YouTube is changing the way operators manage network
performance monitoring. In this paper we introduce
YOUQMON, a novel on-line monitoring system for
assessing the Quality of Experience (QoE) undergone by
HSPA/3G customers watching YouTube videos, using
network-layer measurements only. YOUQMON combines
passive traffic analysis techniques to detect stalling
events in YouTube video streams, with a QoE model to
map stallings into a Mean Opinion Score reflecting the
end-user experience. We evaluate the stalling detection
performance of YOUQMON with hundreds of YouTube video
streams, and present results showing the feasibility of
performing real-time YouTube QoE monitoring in an
operational mobile broadband network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dorsman:2013:PQN,
author = "Jan-Pieter Dorsman and Maria Vlasiou and Bert Zwart",
title = "Parallel queueing networks with {Markov-modulated}
service speeds in heavy traffic",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "47--49",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a network of parallel single-server queues,
where the service speeds are governed by a
continuous-time Markov chain. This generic model finds
applications in many areas such as communication
systems, computer systems and manufacturing systems. We
obtain heavy-traffic approximations for the joint
workload, delay and queue length processes by combining
a functional central limit theorem approach with
matrix-analytic methods. In addition, we numerically
compute the joint distributions by viewing the limit
processes as semi-martingale reflected Brownian
motions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiems:2013:SRE,
author = "Dieter Fiems and Koen {De Turck}",
title = "Spreading rumours in {Euclidean} space",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "50--52",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers the process of spreading rumours
in Euclidean space. The rumour (or epidemic) process
under study includes (i) a discrete-time stochastic
arrival process of new spatially distributed rumours
and (ii) a stochastic process of linear transformations
of the current rumours which allows for modelling
mobility as well as dispersion of rumours. Inspired by
linear filtering theory, an affine stochastic recursion
in characteristic-function space is studied and
numerical transform inversion is used to study the
distribution of rumours in space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vatamidou:2013:CPT,
author = "Eleni Vatamidou and Ivo Adan and Maria Vlasiou and
Bert Zwart",
title = "Corrected phase-type approximations for the workload
of the {MAP/G/1} queue with heavy-tailed service
times",
journal = j-SIGMETRICS,
volume = "41",
number = "2",
pages = "53--55",
month = sep,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2518025.2518036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:07 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many applications, significant correlations between
arrivals of load-generating events make the numerical
evaluation of the load of a system a challenging
problem. Here, we construct very accurate
approximations of the workload distribution of the
MAP/G/1 queue that capture the tail behavior of the
exact workload distribution and provide a small
relative error. Motivated by statistical analysis, we
assume that the service times are a mixture of a
phase-type and a heavy-tailed distribution. With the
aid of perturbation analysis, we derive our
approximations as a sum of the workload distribution of
the MAP/PH/1 queue and a heavytailed component that
depends on the perturbation parameter. We refer to our
approximations as corrected phase-type approximations,
and we exhibit their performance with a numerical
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Koziolek:2013:TSP,
author = "Heiko Koziolek and Steffen Becker and Jens Happe and
Petr Tuma and Thijmen de Gooijer",
title = "Towards software performance engineering for multicore
and manycore systems",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "2--11",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the era of multicore and manycore processors, a
systematic engineering approach for software
performance becomes more and more crucial to the
success of modern software systems. This article argues
for more software performance engineering research
specifically for multicore and manycore systems, which
will have a profound impact on software engineering
practices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2013:AGD,
author = "Eitan Bachmat and Ilan Elhanan",
title = "Analysis of the {GSTF} disk scheduling algorithm",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "13--15",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2013:JOO,
author = "Minghong Lin and Li Zhang and Adam Wierman and Jian
Tan",
title = "Joint optimization of overlapping phases in
{MapReduce}",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "16--18",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MapReduce is a scalable parallel computing framework
for big data processing. It exhibits multiple
processing phases, and thus an efficient job scheduling
mechanism is crucial for ensuring efficient resource
utilization. This work studies the scheduling challenge
that results from the overlapping of the ``map'' and
``shuffle'' phases in MapReduce. We propose a new,
general model for this scheduling problem. Further, we
prove that scheduling to minimize average response time
in this model is strongly NP-hard in the offline case
and that no online algorithm can be
constant-competitive in the online case. However, we
provide two online algorithms that match the
performance of the offline optimal when given a
slightly faster service rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghaderi:2013:RAW,
author = "Javad Ghaderi and Sem Borst and Phil Whiting",
title = "Random access in wireless networks: how much
aggressiveness can cause instability?",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "19--21",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random access schemes are simple and inherently
distributed, yet capable of matching the optimal
throughput performance of centralized scheduling
algorithms. The throughput optimality however has been
established for activation rules that are relatively
sluggish, and may yield excessive queues and delays.
More aggressive/persistent access schemes have the
potential to improve the delay performance, but it is
not clear if they can offer any universal throughput
optimality guarantees. In this paper, we identify a
limit on the aggressiveness of nodes, beyond which
instability is bound to occur in a broad class of
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adan:2013:QSB,
author = "Ivo Adan and Marko Boon and Ana Busi{\'c} and Jean
Mairesse and Gideon Weiss",
title = "Queues with skill based parallel servers and a {FCFS}
infinite matching model",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "22--24",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feinber:2013:DPO,
author = "Eugene A. Feinber and Fenghsu Yang",
title = "Dynamic price optimization for an {M/M/k/N} queue with
several customer types",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "25--27",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Urgaonkar:2013:PSC,
author = "Bhuvan Urgaonkar and George Kesidis and Uday V.
Shanbhag and Cheng Wang",
title = "Pricing of service in clouds: optimal response and
strategic interactions",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "28--30",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:2013:PTM,
author = "Sungsu Lim and Kyomin Jung and John C. S. Lui",
title = "Phase transition of multi-state diffusion process in
networks",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "31--33",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Antunes:2013:PMG,
author = "Nelson Antunes and Gon{\c{c}}alo Jacinto and
Ant{\'o}nio Pacheco",
title = "Probing a {M/G/1} queue with general input and service
times",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "34--36",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the estimation of the arrival rate and the
service time moments of a M/G/1 queue with probing,
i.e., with special customers (probes) entering the
system. The probe inter-arrival times are i.i.d. and
probe service times follow a general positive
distribution. The only observations used are the
arrival times, service times and departure times of
probes. We derive the main equations from which the
quantities of interest can be estimated. Two particular
probe arrivals, deterministic and Poisson, are
investigated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harrison:2013:STD,
author = "Peter Harrison",
title = "Sojourn time distributions in tandem batch-networks",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "37--39",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The joint probability distribution of a customer's
sojourn times in passing through a tandem pair of
geometric batch-queues is obtained as a
Laplace--Stieltjes transform (LST). The results
obtained relate to a customer that passes, within a
full batch, between the queues rather than being
discarded in a partial batch.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spencer:2013:QFI,
author = "Joel Spencer and Madhu Sudan and Kuang Xu",
title = "Queueing with future information",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "40--42",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study an admissions control problem, where a queue
with service rate $ 1 - p $ receives incoming jobs at
rate $ \lambda \in (1 - p, 1) $, and the decision maker
is allowed to redirect away jobs up to a rate of $p$,
with the objective of minimizing the time-average queue
length. We show that the amount of information about
the future has a significant impact on system
performance, in the heavy-traffic regime. When the
future is unknown, the optimal average queue length
diverges at rate $ \log_{1 / (1 - p)} 1 / (1 - \lambda)
$, as $ \lambda \to 1 $. In sharp contrast, when all
future arrival and service times are revealed
beforehand, the optimal average queue length converges
to a finite constant, $ (1 - p) / p $, as $ \lambda \to
1 $. We further show that the finite limit of $ (1 - p)
/ p $ can be achieved using only a finite lookahead
window starting from the current time frame, whose
length scales as $ O(\log (1 / (1 - \lambda))) $, as $
\lambda \to 1 $. This leads to the conjecture of an
interesting duality between queuing delay and the
amount of information about the future.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Poloczek:2013:MEA,
author = "Felix Poloczek and Florin Ciucu",
title = "A martingale-envelope and applications",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "43--45",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the framework of stochastic network calculus we
present a new envelope-based approach which uses
martingales to characterize a queueing system. We show
that this setting allows a simple handling of
multiplexing and scheduling: whereas multiplexing of
several sources results in multiplication of the
corresponding martingales, per-flow analysis in a
scheduled system can be done by shifting the
martingales to a certain point in time. Applying this
calculus to Markov Arrival Processes, it is shown that
the performance bounds can become reasonably tight.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kang:2013:FAM,
author = "Weining Kang and Hongyuan Lu and Guodong Pang",
title = "Fluid approximations of many-server queues with
delayed feedback and heterogeneous service and patience
times",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "46--48",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider many-server queues with delayed feedback
where the service (patience) times of new customers and
feedback customers are differentiated, and new and
feedback customers are served under the
first-come-first-serve (FCFS) discipline in the service
station. The arrival process, service, patience and
delay times are all general and mutually independent. A
two-parameter fluid model for the system dynamics in
the many-server regime is investigated, where we use
four two-parameter processes to describe the service
and queueing processes of the new and feedback
customers, two for the service dynamics and two for the
queueing dynamics. When the arrival rate is constant,
we derive the steady state performance measures and
study the impact of impatience differentiation and
service differentiation upon them. When the arrival
rate is time-varying, we provide an algorithm to
compute the fluid processes. Numerical experiments are
conducted, and show that the algorithm is very
effective, compared with simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dieker:2013:DAL,
author = "A. B. Dieker and T. Suk",
title = "Diffusion approximations for large-scale buffered
systems",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "49--51",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2013:ACR,
author = "Yingdong Lu and Mark S. Squillante and David D. Yao",
title = "Asymptotics of a class of resource planning problems",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "52--54",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ye:2013:ILH,
author = "Heng-Qing Ye and David D. Yao",
title = "Interchange of limits in heavy traffic analysis under
a moment condition",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "55--57",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop an approach to prove the interchange of
limits in heavy traffic analysis of stochastic
processing networks, using a moment condition on the
primitive data, the interarrival and service times. The
approach complements the one in [8], where a bounded
workload condition is required instead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2013:PPU,
author = "Edward G. Coffman and Petar Momcilovi{\'c}",
title = "A particle process underlying {SSD} storage
structures",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "58--60",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a particle process that models the
evolution of page configurations in solid-state-drive
(SSD) storage devices. These devices use integrated
circuitry as memory to store data. Typically, pages
(units of storage) are organized into blocks of a given
size. Three operations are permitted: write, read, and
clean. Rewrites are not allowed, i.e., a page has to be
``cleaned'' before the write operation can be repeated.
While the read and write operations are permitted on
individual pages, the clean operation can be executed
on whole blocks only. Analysis of our particle process
captures a key tradeoff in the operation of SSD's.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gao:2013:RCF,
author = "X. Gao and Y. Lu and M. Sharma and M. S. Squillante
and J. W. Bosman",
title = "Rewards, costs and flexibility in dynamic resource
allocation: a stochastic optimal control approach",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "61--63",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Temple:2013:MMP,
author = "William G. Temple and Richard T. B. Ma",
title = "Monotonic marginal pricing: demand response with price
certainty",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "65--70",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we develop a general dynamic pricing
scheme based on consumer-indexed marginal cost, and
demonstrate its properties in a simulated electricity
market derived from New York ISO data. We show that
monotonic marginal (MM) pricing provides price
certainty, ensuring that every consumer's instantaneous
price is non-increasing for a constant consumption
level. Additionally, we show that MM pricing ensures
budget balance for energy suppliers, allowing them to
recover any operating costs and a profit margin. Using
a Summer 2012 peak load day as a case study, we
simulate a population of over 25000 electricity users
and evaluate the performance of an example MM pricing
plan versus historical real-time prices under various
demand elasticities. The results demonstrate that MM
pricing can provide system-level demand response and
cost savings comparable with real-time pricing, while
protecting consumers from price volatility.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singla:2013:BPS,
author = "Sahil Singla and Yashar Ghiassi-Farrokhfal and
Srinivasan Keshav",
title = "Battery provisioning and scheduling for a hybrid
battery-diesel generator system",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "71--76",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Diesel generators (gensets) are commonly used to
provide a reliable source of electricity in off-grid
locations. Operating a genset is expensive both in
terms of fuel and carbon footprint. Because genset
efficiency increases with offered load, this expense
can be reduced by using a storage battery to ensure
that a genset always runs at full load, charging and
discharging the battery as necessary. However, the cost
of batteries requires us to size them parsimoniously
and operate them efficiently. We, therefore, study the
problem of provisioning and optimally managing a
battery in a hybrid battery-genset system. To aid in
sizing a battery, we analytically study the trade-off
between battery size and carbon footprint. We also
formulate the optimal scheduling of battery charging
and discharging as a mixed-integer program, proving
that it is NP-hard. We then propose a heuristic online
battery scheduling scheme that we call alternate
scheduling and prove that it has a competitive ratio of
$ k_1 G / C + k^2 T_u / k_1 + k_2 T_u $ with respect to
the offline optimal scheduling, where $G$ is the genset
capacity, $C$ is the battery charging rate, $ k_1 $, $
k_2 $ are genset-specific constants, and $ T_u $ is the
duration of a time step. We numerically demonstrate
that alternate scheduling is near-optimal for four
selected industrial loads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gan:2013:RTD,
author = "Lingwen Gan and Adam Wierman and Ufuk Topcu and
Niangjun Chen and Steven H. Low",
title = "Real-time deferrable load control: handling the
uncertainties of renewable generation",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "77--79",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time demand response is potential to handle the
uncertainties of renewable generation. It is expected
that a large number of deferrable loads, including
electric vehicles and smart appliances, will
participate in demand response in the future. In this
paper, we propose a decentralized algorithm that
reduces the tracking error between demand and
generation, by shifting the power consumption of
deferrable loads to match the generation in real-time.
At each time step within the control window, the
algorithm minimizes the expected tracking error to go
with updated predictions on demand and generation. It
is proved that the root mean square tracking error
vanishes as control window expands, even in the
presence of prediction errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2013:OCT,
author = "Kai Yang and Anwar Walid",
title = "Outage-capacity tradeoff for smart grid with
renewables",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "80--82",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Future power grid systems are envisioned to be
integrated with many distributed renewable energy
sources (DRES). Energy storage is the key technology to
enable reliable and cost-effective renewable energy.
Given the fact that large-scale energy storage device
is usually costly to install and operate, we are
naturally led to the following question: How much
storage is needed to guarantee the stability of a power
grid network with DRESs? This paper represents a first
step in systematically exploring the tradeoff between
the capacity of energy storage devices and the outage
probability, i.e., the probability of the occurrence of
imbalance between the supply and demand. We first
propose a secure scheduling and dispatch (SSD)
algorithm that is capable of maintaining the grid
stability in the presence of volatility in the power
generation. We then derive a closed-form bound to
quantify the tradeoff between the storage capacity and
the outage probability. Under mild assumptions, we
reveal that, the outage probability decreases
exponentially with respect to the square of the storage
capacity. This finding implies that energy storage is
an effective and economically viable solution to
maintain the stability of a smart grid network even in
the presence of many volatile and intermittent
renewable energy sources. The impact of correlation in
energy generation on the stability of a smart grid
network is also investigated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chan:2013:CVI,
author = "Christine S. Chan and Boxiang Pan and Kenny Gross and
Kalyan Vaidyanathan and Tajana Simuni{\'c} Rosing",
title = "Correcting vibration-induced performance degradation
in enterprise servers",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "83--88",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Server fan subsystems are power-hungry and generate
vibrations, degrading the performance of data-intensive
workloads and inflating the uptime electric bills of
cost-sensitive datacenters. In this paper, we
demonstrate a systematic server measurement methodology
to isolate different types of vibrations and quantify
their effect on hard disks. We introduce a thermal and
cooling management policy that mitigates vibrational
effects workload scheduling and fan control, and study
the benefits of a hybrid storage array with solid-state
drives (SSDs) that are impervious to vibrations. We
achieve performance improvements of up to 73\% and
energy savings of up to 76\% over the state of the art,
while meeting thermal constraints and improving the
system's resilience to both internal and external
vibrations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2013:ESG,
author = "Hao Wang and Jianwei Huang and Xiaojun Lin and Hamed
Mohsenian-Rad",
title = "Exploring smart grid and data center interactions for
electric power load balancing",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "89--94",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The operation of a data center consumes a tremendous
amount of electricity, and the energy cost accounts for
a large portion of the data center's operation cost.
This leads to a growing interest towards reducing the
energy cost of data centers. One approach advocated in
recent studies is to distribute the computation
workload among multiple geographically dispersed data
centers by exploiting the electricity price
differences. However, the impact of load
redistributions on the power grid is not well
understood yet. This paper takes the first step towards
tackling this important issue, by studying how the
power grid can take advantage of the data center's load
distribution proactively for the purpose of power load
balancing. We model the interactions between power grid
and data centers as a two-stage problem, where the
power grid operator aims to balance the electric power
load in the first stage, and the data centers seek to
minimize their total energy cost in the second stage.
We show that this two-stage problem is a bilevel
program with an indefinite quadratic objective
function, which cannot be solved efficiently using
standard convex optimization algorithms. Therefore, we
reformulate this bilevel optimization problem as a
linear program with additional finite complementarity
slackness conditions, and propose a branch and bound
algorithm to attain the globally optimal solution. The
simulation results demonstrate that our proposed scheme
can improve the load balancing performance by around
12\% in terms of the electric load index and reduce the
energy cost of data centers by 46\% on average.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pervila:2013:HHU,
author = "Mikko Pervil{\"a} and Lassi Remes and Jussi
Kangasharju",
title = "Harvesting heat in an urban greenhouse",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "95--97",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This extended abstract summarizes the key technical
results from the authors' previous article [9]. It
describes the first eight months of operation of a
prototype rooftop greenhouse located in Helsinki,
Finland. This version adds experiences from another
five months, including the past winter. The greenhouse
is heated by exhaust heat harvested from a rack of
computer servers, while the servers are cooled by
unconditioned outside air only.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Widjaja:2013:SSE,
author = "Indra Widjaja and Anwar Walid and Yanbin Luo and Yang
Xu and H. Jonathan Chao",
title = "Switch sizing for energy-efficient datacenter
networks",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "98--100",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Saving power in datacenter networks has become a
pressing issue. ElasticTree and CARPO fat-tree networks
have recently been proposed to reduce power consumption
by using sleep mode during the operation stage of the
network. In this paper, we address the design stage
where the right switch size is evaluated to maximize
power saving during the expected operation of the
network. Our findings reveal that deploying a large
number of small switches is more power-efficient than a
small number of large switches when the traffic demand
is relatively moderate or when servers exchanging
traffic are in close proximity. We also discuss the
impact of sleep mode on performance such as packet
delay and loss.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hou:2013:HHE,
author = "Chenying Hou and Fa Zhang and Antonio Fern{\'a}ndez
Anta and Lin Wang and Zhiyong Liu",
title = "A hop-by-hop energy efficient distributed routing
scheme",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "101--106",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy inefficiencies in current networks provide both
challenges and opportunities for energy saving.
Recently, there are many works focusing on minimizing
energy cost from the routing perspective. However, most
existing work view them as optimization problems and
solve them in a centralized manner such as with a
solver or using approximations. In this paper, we focus
on a network-wide bi-objective optimization problem,
which simultaneously minimizes the total energy
consumption using speed scaling and the total traffic
delay. We propose a hop-by-hop dynamic distributed
routing scheme for the implementation of this
network-wide optimization problem. Our scheme is more
practical to realize in large distributed networks
compared with current centralized energy minimization
methods. We can also achieve near global optimal in a
distributed manner, while most used shortest path
routing protocols such as OSPF cannot make it. Our
routing scheme is totally distributed and maintains
loop-free routes in every instant. Simulations
conducted in real data sets show that the distributed
loop-free routing scheme converges to near Pareto
optimal values. Also, our method outperforms the widely
applied shortest path routing strategy by 30\% in
energy saving.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2013:JVM,
author = "Lin Wang and Fa Zhang and Athanasios V. Vasilakos and
Chenying Hou and Zhiyong Liu",
title = "Joint virtual machine assignment and traffic
engineering for green data center networks",
journal = j-SIGMETRICS,
volume = "41",
number = "3",
pages = "107--112",
month = dec,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2567529.2567560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 28 06:10:08 MST 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "The popularization of cloud computing brings emergency
concern to the energy consumption in big data centers.
Besides the servers, the energy consumed by the network
in a data center is also considerable. Existing works
for improving the network energy efficiency are mainly
focused on traffic engineering, i.e., consolidating
flows and switching off unnecessary devices, which
fails to comprehensively consider the unique features
in data centers. In this paper, we advocate a joint
optimization for achieving energy efficiency of data
center networks by proposing a unified optimization
framework. In this framework, we consider to take
advantage of the application characteristics and
topology features, and to integrate virtual machine
assignment and traffic engineering. Under this
framework, we then devise two efficient algorithms, TE
VMA and TER, for assigning virtual machines and routing
traffic flows respectively. Knowing the communication
patterns of the applications, the TE VMA algorithm is
purposeful and can generate desirable traffic
conditions for the next-step routing optimization. The
TER algorithm makes full use of the hierarchical
feature of the topology and is conducted on the
multipath routing protocol. The performance of the
overall framework is confirmed by both theoretical
analysis and simulation results, where up to 50\% total
energy savings can be achieved, 20\% more compared with
traffic engineering only approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Loiseau:2014:MSG,
author = "Patrick Loiseau and David C. Parkes and Jean Walrand",
title = "{MultiDefender} security games on networks",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "4--7",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stackelberg security game models and associated
computational tools have seen deployment in a number of
high-consequence security settings, such as LAX canine
patrols and Federal Air Marshal Service. This
deployment across essentially independent agencies
raises a natural question: what global impact does the
resulting strategic interaction among the defenders,
each using a similar model, have? We address this
question in two ways. First, we demonstrate that the
most common solution concept of Strong Stackelberg
equilibrium (SSE) can result in significant
under-investment in security entirely because SSE
presupposes a single defender. Second, we propose a
framework based on a different solution concept which
incorporates a model of interdependencies among
targets, and show that in this framework defenders tend
to over-defend, even under significant positive
externalities of increased defense.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Laszka:2014:QAO,
author = "Aron Laszka and Assane Gueye",
title = "Quantifying all-to-one network topology robustness
under budget constraints",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "8--11",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To design robust network topologies that resist
strategic attacks, one must first be able to quantify
robustness. In a recent line of research, the theory of
network blocking games has been used to derive
robustness metrics for topologies. However, these
previous works did not consider the budget constraints
of the network operator. In this paper, we introduce a
budget limit on the operator and study two budget
constraint formulations: the maximum and the expected
cost constraints. For practical applications, the
greatest challenge posed by blocking games is their
computational complexity. Therefore, we show that the
expected cost constraint formulation leads to games
that can be solved efficiently, while the maximum cost
constraint leads to NP-hard problems. As an
illustrative example, this paper discusses the
particular case of All-to-One (e.g., sensor or access)
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dritsoula:2014:GCE,
author = "Lemonia Dritsoula and John Musacchio",
title = "A game of clicks: economic incentives to fight click
fraud in ad networks",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "12--15",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad networks use revenue sharing and effective
filtering of fraudulent clicks to attract publishers.
We develop a simple Hotelling competition-based
game-theoretic model to study the effect of competition
along these dimensions. We compute the Nash equilibrium
strategy for two ad networks that compete for
publishers. We then investigate how the preferences of
the publishers and the quality of the ad networks
affect the market share and the strategies chosen at
equilibrium.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kavurmacioglu:2014:DIP,
author = "Emir Kavurmacio{\u{g}}lu and Murat Alanyali and David
Starobinski",
title = "Demand-insensitive price relationships in competitive
private commons",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "16--19",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce an economic model for private commons
that consists of network providers serving a fixed
primary demand and making strategic pricing decisions
to improve their revenues by providing service to a
secondary demand. For general forms of secondary
demand, we establish the existence and uniqueness of
two critical prices for each provider: the break-even
price and the market sharing price. The prior
determines service profitability while the latter
determines a provider's willingness to share the
market. We further show that the market sharing price
is always greater than the break-even price, leading to
a price interval in which a provider is both profitable
and willing to share the market. Making use of these
results, we shed insight into the nature of market
outcomes (Nash equilibria) when two providers compete
to attract secondary demand: (i) if the market sharing
intervals of the two providers overlap, then the
providers end up sharing the market; (ii) else, the
provider with the lower break-even price captures the
entire market as the result of a price war.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Courcoubetis:2014:RMP,
author = "Costas Courcoubetis and Kostas Sdrolias and Richard
Weber",
title = "Revenue Models, Price Differentiation and Network
Neutrality Implications in the {Internet}",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "20--23",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Park:2014:ICR,
author = "Jiwon Park and Jeonghoon Mo",
title = "{ISP} and {CP} revenue sharing and content piracy",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "24--27",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the network neutrality debate, the revenue
sharing between Internet service providers(ISPs) and
content providers(CPs) has been received attentions. In
this paper, we study the revenue sharing of them from
the perspective of collaboration to reduce online
content piracy. With higher efforts of ISPs to reduce
illegal content traffics,CPs have higher incentives to
share their revenue with ISPs. We study the
possibilities of such collaboration with a game
theoretic model. Our preliminary results seem promising
as both ISPs and CPs can be benefited from the
cooperation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ifrach:2014:BSL,
author = "Bar Ifrach and Costis Maglaras and Marco Scarsini",
title = "{Bayesian} social learning with consumer reviews",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "28--28",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a market of heterogeneous customers who
rationally learn the mean quality of an offered product
by observing the reviews of customers who purchased the
product earlier in time. The seller, who is equally
uniformed about the quality, prices dynamically to
maximize her revenue. We find that social learning is
successful|agents eventually learning the mean quality
of the product. This result holds for an information
structure when the sequence of past reviews and prices
is observed, and, under some assumptions, even when
only aggregate reviews are observed. The latter result
hinges on the observation that earlier reviews are more
influential than later one. In addition, we find that
under general conditions the seller benefits from
social learning ex ante|before knowing the quality of
her product. Finally, we draw conclusions on the
sellers pricing problem when accounting for social
learning. Under some assumptions, we find that lowering
the price speeds social learning, in contrast with
earlier results on social learning from privately
observed signals.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dahleh:2014:CLI,
author = "Munther Dahleh and Alireza TahbazSalehi and John N.
Tsitsiklis and Spyros I. Zoumpoulis",
title = "Coordination with local information",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "29--29",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "How can the details of who observes what affect the
outcomes of economic, social, and political
interactions? Our thesis is that outcomes do not depend
merely on the status quo and the available (noisy)
information on it; they also crucially depend on how
the available pieces of information are allocated among
strategic agents. We study the dependence of
coordination outcomes on local information sharing. In
the economic literature, common knowledge of the
fundamentals leads to the standard case of multiple
equilibria due to the self-fulfilling nature of agents'
beliefs. The global-games framework has been used
extensively as a toolkit for arriving at a unique
equilibrium selection in the context of coordination
games, which can model bank runs, currency attacks, and
social uprisings, among others. Yet, there is a natural
mechanism through which multiplicity can reemerge,
while keeping information solely exogenous: the
(exogenous) information structure per se, namely, the
details of who observes what noisy observation. The aim
of this paper is to understand the role of the
exogenous information structure in the determination
and characterization of equilibria in the coordination
game. We answer the question of how the equilibria of
the coordination game depend on the details of local
information sharing. Our main contribution is to
provide conditions for uniqueness and multiplicity that
pertain solely to the details of information sharing.
The findings in the present paper give an immediate
answer as to the determinacy of equilibria using only
the characterization of what agent observes what pieces
of information. We build on the standard global game
framework for coordination games with incomplete and
asymmetric information and consider a coordination game
in which each of a collection of agents decides whether
to take a risky action (whose payoff depends on how
many agents made the same decision, and the
fundamentals) or a safe action, based on their noisy
observations regarding the fundamentals. Generalizing
away from the standard practice of considering only
private and public signals, we allow for signals that
are observed by arbitrary subsets of the agents. We
refer to signals that are neither private nor public as
local signals. We pose the following question: how do
the equilibria of the coordination game depend on the
information locality, i.e., on the details of local
information sharing. Our key finding is that the number
of equilibria is highly sensitive to the details of
information locality. As a result, a new dimension of
indeterminacy regarding the outcomes is being
introduced: not only may the same fundamentals well
lead to different outcomes in different societies, due
to different realizations of the noisy observations;
the novel message of this work is that the same
realization of the noisy observations is compatible
with different equilibrium outcomes in societies with
different structures of local information sharing. In
particular, we show that as long as a collection of
agents share the same observations, and no other
agent's observations overlap with their common
observations, multiple equilibria arise. Identical
observations is not, nevertheless, a necessary
condition for multiplicity: we show that as long as the
observations of a collection of agents form a cascade
of containments, and no other agent's observations
overlap with the observations of the collection, then
multiplicity emerges. This is not to say however that
common knowledge of information at the local level
necessarily implies multiplicity: in particular, in the
absence of identical observations or cascade of
containments of observations, or if the condition of no
overlap of information is violated, then, despite the
presence of some signals that are common knowledge
between agents, a unique equilibrium may be selected.
In the case where each agent observes exactly one
signal, we characterize the set of equilibria as a
function of the details of the information structure.
We show how the distance between the largest and
smallest equilibria depends on how information is
locally shared among the agents. In particular, the
more equalized the sizes of the sets of agents who
observe the same signal, the more diverse the
information of each group becomes, heightening
inter-group strategic uncertainty, and leading to a
more refined set of equilibria. We use our
characterization to study the set of equilibria in
large coordination games. We show that as the number of
agents grows, the game exhibits a unique equilibrium if
and only if the largest set of agents with access to a
common signal grows sublinearly in the number of
agents, thus identifying a sharp threshold for
uniqueness versus multiplicity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abbassi:2014:DCC,
author = "Zeinab Abbassi and Nidhi Hegde and Laurent
Massouli{\'e}",
title = "Distributed content curation on the {Web}",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "30--33",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years there has been an explosive growth of
digital content in the form of news feeds, videos, and
original content, on online platforms such as blogs and
social networks. We consider the problem of curating
this vast catalogue of content such that aggregators or
publishers can offer readers content that is of
interest to them, with minimal spam. Under a
game-theoretic model we obtain several results on the
optimal content selection and on the efficiency of
distributed curation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2014:IDH,
author = "Jie Xu and Mihaela van der Schaar",
title = "Incentive design for heterogeneous user-generated
content networks",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "34--37",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper designs rating systems aimed at
incentivizing users in UGC networks to produce content,
thereby significantly improving the social welfare of
such networks. We explicitly consider that monitoring
user's production activities is imperfect. Such
imperfect monitoring will lead to undesired rating drop
of users, thereby reducing the social welfare of the
network. The network topology constraint and users'
heterogeneity further complicates the optimal rating
system design problem since users' incentives are
complexly coupled. This paper determines optimal
recommendation strategies under a variety of monitoring
scenarios. Our results suggest that, surprisingly,
allowing a certain level of free-riding behavior may
lead to higher social welfare than incentivizing all
users to produce.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2014:BLS,
author = "Chong Jiang and Carolyn L. Beck and R. Srikant",
title = "Bidding with limited statistical knowledge in online
auctions",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "38--41",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider online auctions from the point of view of
a single bidder who has an average budget constraint.
By modeling the rest of the bidders through a
probability distribution (often referred to as the
mean-field approximation), we develop a simple bidding
strategy which can be implemented without any
statistical knowledge of bids, valuations, and query
arrival processes. The key idea is to use stochastic
approximation techniques to automatically track
long-term averages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rallapalli:2014:MVI,
author = "Swati Rallapalli and Qiang Ma and Han Hee Song and
Mario Baldi and S. Muthukrishnan and Lili Qiu",
title = "Modeling the value of information granularity in
targeted advertising",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "42--45",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Behavioral Targeting (BT) in the past few years has
seen a great upsurge in commercial as well as research
interest. To make advertising campaigns more effective,
advertisers look to target more relevant users.
Ad-networks and other data collectors, such as,
Cellular Service Providers (CSPs), hold a treasure
trove of user information that is extremely valuable to
advertisers. Moreover, these players may have
complimentary sets of data. Combining and using data
from different collectors can be very useful for
advertising. However, in the trade of data among the
various players, it is currently unclear how a price
can be attached to a certain piece of information. This
work contributes (i) a MOdel of the Value of
INformation Granularity (MoVInG) that captures the
impact of additional information on the revenue from
targeted ads in case of uniform bidding and (ii) an
expression that is applicable in more general
scenarios. We apply MoVInG to a user data-set from a
large CSP to evaluate the financial benefit of precise
user data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:2014:SSD,
author = "Mung Chiang",
title = "{SDP}: {Smart Data Pricing}, from theorems to trials",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "46--46",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fifty years ago, the transition from voice calls to
bursty data traffic justified packet switched networks.
As data traffic becomes more heterogeneous today, what
are the implications to network architecture? Instead
of just counting bytes, Smart Data Pricing (SDP)
manages traffic by treating different bytes
differently. SDP can refer to (a) usage pricing like
\$10/GB, with throttling/booster, (b)
time/location/app/congestion-dependent dynamic pricing,
(c) two-sided 1-800 pricing, (d) WiFi
offloading/proactive caching, (e) quota-aware content
distribution, (f) transaction-based pricing, \ldots{},
or any of the above. It can help create happier users,
less congestion and better QoE, lower CapEx/OpEx,
higher revenue/profit margin, less churn, more
consumption, and more ad revenue. But it also requires
smart interfaces among pipe providers and content/app
providers, and a combination of fundamental research,
systems implementation, and user trials. This talk
summarizes what we learned about the win--win that
awaits the whole ecosystem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2014:ECP,
author = "Jonatha Anselmi and Danilo Ardagna and John C. S. Lui
and Adam Wierman and Yunjian Xu and Zichao Yang",
title = "The economics of the cloud: price competition and
congestion",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "47--49",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berbeglia:2014:PMD,
author = "Gerardo Berbeglia and Peter Sloan and Adrian Vetta",
title = "Pricing mechanisms for a durable good monopolist",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "50--50",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A durable good is a long-lasting good that can be
consumed repeatedly over time. Theoretically less is
known about durable goods than their more well-studied
counterparts, perishable goods. However, on the
practical side, durable goods abound and are very
familiar to us. For example, many of the most important
consumer items are (at least to some extent) durable,
such as land, housing, cars, diamonds etc. A duropolist
is a monopolist in the market of a durable good.
Topically, duropolists include several well-known
purveyors of digital goods. In this talk, we examine
the strategic issues facing duropolists. Pricing a
durable good is not as simple as it may appear.
Specifically, whilst durable goods are more desirable
to the consumer, it is questionable whether a
duropolist has additional monopoly power beyond that of
an equivalent monopolist for a perishable good. Indeed,
quite the opposite may be true. In 1972, Richard Coase
made the startling conjecture that, in fact, a
duropolist has no monopoly power at all! The argument
being that a duropolist is not, in essence, a
monopolist: the firm does face stiff competition not
from other firms but, rather, from future incarnations
of itself. There have since been several proofs and
disproofs of the conjecture for under assorted economic
models and time horizons. We discuss this, and also
real-world strategies that duropolists use to avoid the
conundrum highlighted by Coase. Our main results are to
quantify how well various price mechanisms perform.
Specifically, we give tight bounds, for the finite time
horizon case, on the relative profitabilities of these
mechanisms in terms of the number of time periods and
the total number of consumers. In doing so, we quantify
the extent to which a duropolist can generate higher
profits than an equivalent monopolist for a perishable
good.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ifrach:2014:PBS,
author = "Bar Ifrach and Rameh Johari",
title = "Pricing a bestseller: sales and visibility in the
marketplace for mobile apps",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "51--51",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growth of mobile applications on smartphones and
tablets (``apps'') ranks as one of the most astonishing
technological developments in recent past. Over 700,000
apps are available for immediate download from app
markets (e.g., App Store and Google Play). These
marketplaces are a significant disruptive change in the
way content is created and consumed. On the supply
side, they provide content creators direct,
instantaneous, and popular distribution systems where
they can implement their own marketing and pricing
policies, cutting out middlemen. Taking a combined
data-driven and structural analysis approach, this
paper focuses on the relationship between pricing
decisions and marketplace visibility. Our aim is to
empower content creators by offering strategic guidance
on how to leverage the marketplaces' flexibility.
Specifically, the market platforms feature
``top-ranked'' charts that list apps by number of
downloads. A high position in these charts is followed
by a remarkable boost in demand, according to industry
sources. We call the effect of top-rank position on
future sales an indirect effect. First, we postulate a
reduced form model to estimate the magnitude of this
indirect effect. Our results show that it is
statistically significant and substantial. Second, we
study app pricing decisions in a model that
incorporates our earlier findings. Surprisingly, we
find that accounting for the indirect effect may give
rise to optimal price cycles, where the seller
alternates between a high price to boost revenue and a
low one to enhance visibility. We find evidence
supporting this pricing behavior in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:2014:DAL,
author = "Daniel T. Wagner and Andrew Rice and Alastair R.
Beresford",
title = "Device analyzer: largescale mobile data collection",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "53--56",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We collected usage information from 12,500 Android
devices in the wild over the course of nearly 2 years.
Our dataset contains 53 billion data points from 894
models of devices running 687 versions of Android.
Processing the collected data presents a number of
challenges ranging from scalability to consistency and
privacy considerations. We present our system
architecture for collection and analysis of this
highly-distributed dataset, discuss how our system can
reliably collect time-series data in the presence of
unreliable timing information, and discuss issues and
lessons learned that we believe apply to many other big
data collection projects.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feinberg:2014:OCU,
author = "Eugene Feinberg and Xiaoxuan Zhang",
title = "Optimizing cloud utilization via switching decisions",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "57--60",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies a control problem for optimal
switching on and off a cloud computing services modeled
by an M=M=1 queue with holding, running and switching
costs. The main result is that an average-optimal
policy either always runs the system or is an (M; N)-
policy defined by two thresholds M and N, such that the
system is switched on upon an arrival epoch when the
system size accumulates to N and it is switched off
upon a departure epoch when the system size decreases
to M. We compare the optimal (M; N)-policy with the
classical (0; N)-policy and show the non-optimality of
it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yilmaz:2014:FDK,
author = "Onur Yilmaz and Mustafa U. Torun and Ali N. Akansu",
title = "A fast derivation of {Karhunen--Loeve} transform
kernel for first-order autoregressive discrete
process",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "61--64",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Karhunen-Lo{\`e}ve Transform (KLT), also called
principal component analysis (PCA) or factor analysis,
based signal processing methods have been successfully
used in applications spanning from eigenfiltering to
recommending systems. KLT is a signal dependent
transform and comprised of three major steps where each
has its own computational requirement. Namely,
statistical measurement of random data is performed to
populate its covariance matrix. Then, eigenvectors
(eigenmatrix) and eigenvalues are calculated for the
given covariance matrix. Last, incoming random data
vector is mapped onto the eigenspace (subspace) by
using the calculated eigenmatrix. The recently
developed method by Torun and Akansu offers an
efficient derivation of the explicit eigenmatrix for
the covariance matrix of first-order autoregressive,
AR(1), discrete stochastic process. It is the second
step of the eigenanalysis implementation as summarized
in the paper. Its computational complexity is
investigated and compared with the currently used
techniques. It is shown that the new method
significantly outperforms the others, in particular,
for very large matrix sizes that are common in big data
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Madan:2014:ATA,
author = "Bharat B. Madan and Manoj Banik",
title = "Attack tolerant architecture for big data file
systems",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "65--69",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data driven decisions derived from big data have
become critical in many application domains, fueling
the demand for collection, transportation, storage and
processing of massive volumes of data. Such
applications have made data a valuable resource that
needs to be provided appropriate security. High value
associated with big data sets has rendered big data
storage systems attractive targets for cyber attackers,
whose goal is to compromise the Confidentiality,
Integrity and Availability of data and information.
Common defense strategy for protecting cyber assets has
been to first take preventive measures, and if these
fail, detecting intrusions and finally recovery.
Unfortunately, attackers have developed tremendous
technical sophistication to defeat most defensive
mechanisms. Alternative strategy is to design
architectures which are intrinsically attack tolerant.
This paper describes a technique that involves
eliminating single point of security failures through
fragmentation, coding, dispersion and reassembly. It is
shown that this technique can be successfully applied
to routing, networked storage systems, and big data
file systems to make them attack tolerant.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suthaharan:2014:BDC,
author = "Shan Suthaharan",
title = "Big data classification: problems and challenges in
network intrusion prediction with machine learning",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "70--73",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on the specific problem of Big Data
classification of network intrusion traffic. It
discusses the system challenges presented by the Big
Data problems associated with network intrusion
prediction. The prediction of a possible intrusion
attack in a network requires continuous collection of
traffic data and learning of their characteristics on
the fly. The continuous collection of traffic data by
the network leads to Big Data problems that are caused
by the volume, variety and velocity properties of Big
Data. The learning of the network characteristics
require machine learning techniques that capture global
knowledge of the traffic patterns. The Big Data
properties will lead to significant system challenges
to implement machine learning frameworks. This paper
discusses the problems and challenges in handling Big
Data classification using geometric
representation-learning techniques and the modern Big
Data networking technologies. In particular this paper
discusses the issues related to combining supervised
learning techniques, representation-learning
techniques, machine lifelong learning techniques and
Big Data technologies (e.g. Hadoop, Hive and Cloud) for
solving network traffic classification problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2014:MAC,
author = "Abhishek B. Sharma and Franjo Ivanci{\'c} and
Alexandru Niculescu-Mizil and Haifeng Chen and Guofei
Jiang",
title = "Modeling and analytics for cyber-physical systems in
the age of big data",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "74--77",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this position paper we argue that the availability
of ``big'' monitoring data on Cyber-Physical Systems
(CPS) is challenging the traditional CPS modeling
approaches by violating their fundamental assumptions.
However, big data also brings unique opportunities in
its wake by enabling new modeling and analytics
approaches as well as facilitating novel applications.
We highlight a few key challenges and opportunities,
and outline research directions for addressing them. To
provide a proper context, we also summarize CPS
modeling approaches, and discuss how modeling and
analytics for CPS differs from general purpose IT
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:2014:AIM,
author = "Jie Hu and Kun Meng and Xiaomin Chen and Chuang Lin
and Jiwei Huang",
title = "Analysis of influence maximization in large-scale
social networks",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "78--81",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Influence maximization is an important problem in
online social networks. With the scale of social
networks increasing, the requirements of solutions for
influence maximization are becoming more and more
strict. In this paper, we discuss two basic methods to
compute the influence in general social networks, and
then reveal that the computation of influence in
series-parallel graph is in linear time complexity.
Finally, we propose an novel method to solve influence
maximization and show that it has a good performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Whitworth:2014:SPC,
author = "Jeff Whitworth and Shan Suthaharan",
title = "Security problems and challenges in a machine
learning-based hybrid big data processing network
systems",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "82--85",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The data source that produces data continuously in
high volume and high velocity with large varieties of
data types creates Big Data, and causes problems and
challenges to Machine Learning (ML) techniques that
help extract, analyze and visualize important
information. To overcome these problems and challenges,
we propose to make use of the hybrid networking model
that consists of multiple components such as Hadoop
distributed file system (HDFS), cloud storage system,
security module and ML unit. Processing of Big Data in
this networking environment with ML technique requires
user interaction and additional storage hence some
artificial delay between the arrivals of data domains
through external storage can help HDFSto process the
Big Data efficiently. To address this problem we
suggest using public cloud for data storage which will
induce meaningful time delay to the data while making
use of its storage capability. However, the use of
public cloud will lead to security vulnerability to the
data transmission and storage. Therefore, we need some
form of security algorithm that provides a flexible
key-based encryption technique that can provide
tradeoffs between time-delay, security strength and
storage risks. In this paper we propose a model for
using public cloud provider trust levels to select
encryption types for data storage for use within a Big
Data analytics network topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Savas:2014:TBD,
author = "Onur Savas and Yalin Sagduyu and Julia Deng and Jason
Li",
title = "Tactical big data analytics: challenges, use cases,
and solutions",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "86--89",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss tactical challenges of the Big Data
analytics regarding the underlying data, application
space, and computing environment, and present a
comprehensive solution framework motivated by the
relevant tactical use cases. First, we summarize the
unique characteristics of the Big Data problem in the
Department of Defense (DoD) context and underline the
main differences from the commercial Big Data problems.
Then, we introduce two use cases, (i) Big Data
analytics with multi-intelligence (multi-INT) sensor
data and (ii) man-machine crowdsourcing using MapReduce
framework. For these two use cases, we introduce Big
Data analytics and cloud computing solutions in a
coherent framework that supports tactical data,
application, and computing needs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2014:FOL,
author = "Yu Zhang and Daby Sow and Deepak Turaga and Mihaela
van der Schaar",
title = "A fast online learning algorithm for distributed
mining of {BigData}",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "90--93",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627562",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "BigData analytics require that distributed mining of
numerous data streams is performed in real-time. Unique
challenges associated with designing such distributed
mining systems are: online adaptation to incoming data
characteristics, online processing of large amounts of
heterogeneous data, limited data access and
communication capabilities between distributed
learners, etc. We propose a general framework for
distributed data mining and develop an efficient online
learning algorithm based on this. Our framework
consists of an ensemble learner and multiple local
learners, which can only access different parts of the
incoming data. By exploiting the correlations of the
learning models among local learners, our proposed
learning algorithms can optimize the prediction
accuracy while requiring significantly less information
exchange and computational complexity than existing
state-of-the-art learning solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heintz:2014:BGT,
author = "Benjamin Heintz and Abhishek Chandra",
title = "Beyond graphs: toward scalable hypergraph analysis
systems",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "94--97",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph theory has provided a powerful modeling
foundation for problems in many domains, but we argue
that group interactions are better modeled by
hypergraphs. As we work toward scalable systems for
such hypergraph analysis, several major challenges and
opportunities arise; here we highlight a sample of
those challenges. We consider the need for efficient
representations of hypergraphs, and show that in some
cases it is possible to exploit the specific structure
of a hypergraph to reduce storage overhead. We also
explore several challenges in distributing computation
on hypergraphs, including the need for more general
partitioning approaches. Finally, we discuss several
other problems that arise as we move from graphs to
hypergraphs, including designing programming models,
using hypergraphs to model real-world groups, and the
need for a better understanding of the structural
characteristics of hypergraphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Al-Jaroodi:2014:DDB,
author = "Jameela Al-Jaroodi and Nader Mohamed and Abdulla Eid",
title = "Dual direction big data download and analysis",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "98--101",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The term Big Data was recently coined as the amount of
generated and stored digital data has grown so rapidly
that it has become very hard to store, manage and
analyze without coming up with new techniques that can
cope with such challenges. Finding innovative
approaches to support big data analysis has become a
priority as both the research community and the
industry are trying to make use of these huge amounts
of available data. In this paper we introduce a new
approach to enhance the overall big data analysis
performance. The approach calls for utilizing data set
replication, parallel download, and parallel processing
over multiple compute nodes. The main concept calls for
simultaneously parallelizing the download of the data
(in partitions) from multiple replicated sites to
multiple compute nodes that will also perform the
analysis in parallel. Then the results are given to the
client that requested the analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brock:2014:LAN,
author = "Jacob Brock and Hao Luo and Chen Ding",
title = "Locality analysis: a nonillion time window problem",
journal = j-SIGMETRICS,
volume = "41",
number = "4",
pages = "102--105",
month = mar,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2627534.2627565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed May 21 15:15:25 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The rise of social media and cloud computing, paired
with ever-growing storage capacity are bringing big
data into the limelight, and rightly so. Data, it
seems, can be found everywhere; It is harvested from
our cars, our pockets, and soon even from our
eyeglasses. While researchers in machine learning are
developing new techniques to analyze vast quantities of
sometimes unstructured data, there is another,
not-so-new, form of big data analysis that has been
quietly laying the architectural foundations of
efficient data usage for decades. Every time a piece of
data goes through a processor, it must get there
through the memory hierarchy. Since retrieving the data
from the main memory takes hundreds of times longer
than accessing it from the cache, a robust theory of
data usage can lay the groundwork for all efficient
caching. Since everything touched by the CPU is first
touched by the cache, the cache traces produced by the
analysis of big data will invariably be bigger than
big. In this paper we first summarize the locality
problem and its history, and then we give a view of the
present state of the field as it adapts to the industry
standards of multicore CPUs and multithreaded programs
before exploring ideas for expanding the theory to
other big data domains.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2014:RSD,
author = "Zhaoxu Wang and Wenxiang Dong and Wenyi Zhang and Chee
Wei Tan",
title = "Rumor source detection with multiple observations:
fundamental limits and algorithms",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "1--13",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591993",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the problem of a single rumor
source detection with multiple observations, from a
statistical point of view of a spreading over a
network, based on the susceptible-infectious model. For
tree networks, multiple sequential observations for one
single instance of rumor spreading cannot improve over
the initial snapshot observation. The situation
dramatically improves for multiple independent
observations. We propose a unified inference framework
based on the union rumor centrality, and provide
explicit detection performance for degree-regular tree
networks. Surprisingly, even with merely two
observations, the detection probability at least
doubles that of a single observation, and further
approaches one, i.e., reliable detection, with
increasing degree. This indicates that a richer
diversity enhances detectability. For general graphs, a
detection algorithm using a breadth-first search
strategy is also proposed and evaluated. Besides rumor
source detection, our results can be used in network
forensics to combat recurring epidemic-like information
spreading such as online anomaly and fraudulent email
spams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2014:COS,
author = "Jinyoung Han and Daejin Choi and Byung-Gon Chun and
Ted Kwon and Hyun-chul Kim and Yanghee Choi",
title = "Collecting, organizing, and sharing pins in pinterest:
interest-driven or social-driven?",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "15--27",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591996",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Pinterest, a popular social curating service where
people collect, organize, and share content (pins in
Pinterest), has gained great attention in recent years.
Despite the increasing interest in Pinterest, little
research has paid attention to how people collect,
manage, and share pins in Pinterest. In this paper, to
shed insight on such issues, we study the following
questions. How do people collect and manage pins by
their tastes in Pinterest? What factors do mainly drive
people to share their pins in Pinterest? How do the
characteristics of users (e.g., gender, popularity,
country) or properties of pins (e.g., category, topic)
play roles in propagating pins in Pinterest? To answer
these questions, we have conducted a measurement study
on patterns of pin curating and sharing in Pinterest.
By keeping track of all the newly posted and shared
pins in each category (e.g., animal, kids, women's
fashion) from June 5 to July 18, 2013, we built 350 K
pin propagation trees for 3 M users. With the dataset,
we investigate: (1) how users collect and curate pins,
(2) how users share their pins and why, and (3) how
users are related by shared pins of interest. Our key
finding is that pin propagation in Pinterest is mostly
driven by pin's properties like its topic, not by
user's characteristics like her number of followers. We
further show that users in the same community in the
interest graph (i.e., representing the relations among
users) of Pinterest share pins (i) in the same category
with 94\% probability and (ii) of the same URL where
pins come from with 89\% probability. Finally, we
explore the implications of our findings for predicting
how pins are shared in Pinterest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2014:JCR,
author = "Jiaming Xu and Rui Wu and Kai Zhu and Bruce Hajek and
R. Srikant and Lei Ying",
title = "Jointly clustering rows and columns of binary
matrices: algorithms and trade-offs",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "29--41",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592005",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In standard clustering problems, data points are
represented by vectors, and by stacking them together,
one forms a data matrix with row or column cluster
structure. In this paper, we consider a class of binary
matrices, arising in many applications, which exhibit
both row and column cluster structure, and our goal is
to exactly recover the underlying row and column
clusters by observing only a small fraction of noisy
entries. We first derive a lower bound on the minimum
number of observations needed for exact cluster
recovery. Then, we study three algorithms with
different running time and compare the number of
observations needed by them for successful cluster
recovery. Our analytical results show smooth time-data
trade offs: one can gradually reduce the computational
complexity when increasingly more observations are
available.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{May:2014:FFH,
author = "Avner May and Augustin Chaintreau and Nitish Korula
and Silvio Lattanzi",
title = "Filter \& follow: how social media foster content
curation",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "43--55",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592010",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The impact of blogs and microblogging on the
consumption of news is dramatic, as every day users
rely more on these sources to decide what content to
pay attention to. In this work, we empirically and
theoretically analyze the dynamics of bloggers serving
as intermediaries between the mass media and the
general public. Our first contribution is to precisely
describe the receiving and posting behaviors of today's
social media users. For the first time, we study
jointly the volume and popularity of URLs received and
shared by users. We show that social media platforms
exhibit a natural ``content curation'' process. Users
and bloggers in particular obey two filtering laws: (1)
a user who receives less content typically receives
more popular content, and (2) a blogger who is less
active typically posts disproportionately popular
items. Our observations are remarkably consistent
across 11 social media data sets. We find evidence of a
variety of posting strategies, which motivates our
second contribution: a theoretical understanding of the
consequences of strategic posting on the stability of
social media, and its ability to satisfy the interests
of a diverse audience. We introduce a
``blog-positioning game'' and show that it can lead to
``efficient'' equilibria, in which users generally
receive the content they are interested in.
Interestingly, this model predicts that if users are
overly ``picky'' when choosing who to follow, no pure
strategy equilibria exists for the bloggers, and thus
the game never converges. However, a bit of leniency by
the readers in choosing which bloggers to follow is
enough to guarantee convergence.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ioannidis:2014:PTP,
author = "Stratis Ioannidis and Andrea Montanari and Udi
Weinsberg and Smriti Bhagat and Nadia Fawaz and Nina
Taft",
title = "Privacy tradeoffs in predictive analytics",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "57--69",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592011",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Online services routinely mine user data to predict
user preferences, make recommendations, and place
targeted ads. Recent research has demonstrated that
several private user attributes (such as political
affiliation, sexual orientation, and gender) can be
inferred from such data. Can a privacy-conscious user
benefit from personalization while simultaneously
protecting her private attributes? We study this
question in the context of a rating prediction service
based on matrix factorization. We construct a protocol
of interactions between the service and users that has
remarkable optimality properties: it is
privacy-preserving, in that no inference algorithm can
succeed in inferring a user's private attribute with a
probability better than random guessing; it has maximal
accuracy, in that no other privacy-preserving protocol
improves rating prediction; and, finally, it involves a
minimal disclosure, as the prediction accuracy strictly
decreases when the service reveals less information. We
extensively evaluate our protocol using several rating
datasets, demonstrating that it successfully blocks the
inference of gender, age and political affiliation,
while incurring less than 5\% decrease in the accuracy
of rating prediction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:2014:OAF,
author = "Weijie Shi and Linquan Zhang and Chuan Wu and Zongpeng
Li and Francis C. M. Lau",
title = "An online auction framework for dynamic resource
provisioning in cloud computing",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "71--83",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591980",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Auction mechanisms have recently attracted substantial
attention as an efficient approach to pricing and
resource allocation in cloud computing. This work, to
the authors' knowledge, represents the first online
combinatorial auction designed in the cloud computing
paradigm, which is general and expressive enough to
both (a) optimize system efficiency across the temporal
domain instead of at an isolated time point, and (b)
model dynamic provisioning of heterogeneous Virtual
Machine (VM) types in practice. The final result is an
online auction framework that is truthful,
computationally efficient, and guarantees a competitive
ratio $ e + 1 $ over $ e - 1 $ $ \approx 3.30 $ in
social welfare in typical scenarios. The framework
consists of three main steps: (1) a tailored
primal-dual algorithm that decomposes the long-term
optimization into a series of independent one-shot
optimization problems, with an additive loss of $1$
over $ e - 1$ in competitive ratio, (2) a randomized
auction sub-framework that applies primal-dual
optimization for translating a centralized co-operative
social welfare approximation algorithm into an auction
mechanism, retaining a similar approximation ratio
while adding truthfulness, and (3) a primal-dual update
plus dual fitting algorithm for approximating the
one-shot optimization with a ratio $ \lambda $ close to
$e$. The efficacy of the online auction framework is
validated through theoretical analysis and trace-driven
simulation studies. We are also in the hope that the
framework, as well as its three independent modules,
can be instructive in auction design for other related
problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2014:EPS,
author = "Jayakrishnan Nair and Sachin Adlakha and Adam
Wierman",
title = "Energy procurement strategies in the presence of
intermittent sources",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "85--97",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing penetration of intermittent,
unpredictable renewable energy sources such as wind
energy, poses significant challenges for utility
companies trying to incorporate renewable energy in
their portfolio. In this work, we study the problem of
conventional energy procurement in the presence of
intermittent renewable resources. We model the problem
as a variant of the newsvendor problem, in which the
presence of renewable resources induces supply side
uncertainty, and in which conventional energy may be
procured in three stages to balance supply and demand.
We compute closed-form expressions for the optimal
energy procurement strategy and study the impact of
increasing renewable penetration, and of proposed
changes to the structure of electricity markets. We
explicitly characterize the impact of a growing
renewable penetration on the procurement policy by
considering a scaling regime that models the
aggregation of unpredictable renewable sources. A key
insight from our results is that there is a separation
between the impact of the stochastic nature of this
aggregation, and the impact of market structure and
forecast accuracy. Additionally, we study the impact on
procurement of two proposed changes to the market
structure: the addition and the placement of an
intermediate market. We show that addition of an
intermediate market does not necessarily increase the
efficiency of utilization of renewable sources.
Further, we show that the optimal placement of the
intermediate market is insensitive to the level of
renewable penetration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2014:RAD,
author = "Linquan Zhang and Zongpeng Li and Chuan Wu",
title = "Randomized auction design for electricity markets
between grids and microgrids",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "99--110",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591999",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work studies electricity markets between power
grids and microgrids, an emerging paradigm of electric
power generation and supply. It is among the first that
addresses the economic challenges arising from such
grid integration, and represents the first power
auction mechanism design that explicitly handles the
Unit Commitment Problem (UCP), a key challenge in power
grid optimization previously investigated only for
centralized cooperative algorithms. The proposed
solution leverages a recent result in theoretical
computer science that can decompose an optimal
fractional (infeasible) solution to NP-hard problems
into a convex combination of integral (feasible)
solutions. The end result includes randomized power
auctions that are (approximately) truthful and
computationally efficient, and achieve small
approximation ratios for grid-wide social welfare under
UCP constraints and temporal demand correlations. Both
power markets with grid-to-microgrid and
microgrid-to-grid energy sales are studied, with an
auction designed for each, under the same randomized
power auction framework. Trace driven simulations are
conducted to verify the efficacy of the two proposed
inter-grid power auctions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2014:PDC,
author = "Zhenhua Liu and Iris Liu and Steven Low and Adam
Wierman",
title = "Pricing data center demand response",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "111--123",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592004",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Demand response is crucial for the incorporation of
renewable energy into the grid. In this paper, we focus
on a particularly promising industry for demand
response: data centers. We use simulations to show
that, not only are data centers large loads, but they
can provide as much (or possibly more) flexibility as
large-scale storage if given the proper incentives.
However, due to the market power most data centers
maintain, it is difficult to design programs that are
efficient for data center demand response. To that end,
we propose that prediction-based pricing is an
appealing market design, and show that it outperforms
more traditional supply function bidding mechanisms in
situations where market power is an issue. However,
prediction-based pricing may be inefficient when
predictions are inaccurate, and so we provide analytic,
worst-case bounds on the impact of prediction error on
the efficiency of prediction-based pricing. These
bounds hold even when network constraints are
considered, and highlight that prediction-based pricing
is surprisingly robust to prediction error.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Larranaga:2014:IPM,
author = "Maialen Larra{\~n}aga and Urtzi Ayesta and Ina Maria
Verloop",
title = "Index policies for a multi-class queue with convex
holding cost and abandonments",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "125--137",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591983",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate a resource allocation problem in a
multi-class server with convex holding costs and user
impatience under the average cost criterion. In
general, the optimal policy has a complex dependency on
all the input parameters and state information. Our
main contribution is to derive index policies that can
serve as heuristics and are shown to give good
performance. Our index policy attributes to each class
an index, which depends on the number of customers
currently present in that class. The index values are
obtained by solving a relaxed version of the optimal
stochastic control problem and combining results from
restless multi-armed bandits and queueing theory. They
can be expressed as a function of the steady-state
distribution probabilities of a one-dimensional
birth-and-death process. For linear holding cost, the
index can be calculated in closed-form and turns out to
be independent of the arrival rates and the number of
customers present. In the case of no abandonments and
linear holding cost, our index coincides with the $ c
\mu $-rule, which is known to be optimal in this simple
setting. For general convex holding cost we derive
properties of the index value in limiting regimes: we
consider the behavior of the index (i) as the number of
customers in a class grows large, which allows us to
derive the asymptotic structure of the index policies,
and (ii) as the abandonment rate vanishes, which allows
us to retrieve an index policy proposed for the
multi-class M/M/1 queue with convex holding cost and no
abandonments. In fact, in a multi-server environment it
follows from recent advances that the index policy is
asymptotically optimal for linear holding cost. To
obtain further insights into the index policy, we
consider the fluid version of the relaxed problem and
derive a closed-form expression for the fluid index.
The latter coincides with the stochastic model in case
of linear holding costs. For arbitrary convex holding
cost the fluid index can be seen as the $ G c \mu
\theta $-rule, that is, including abandonments into the
generalized $ c \mu $-rule ($ G c \mu $-rule).
Numerical experiments show that our index policies
become optimal as the load in the system increases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Walton:2014:CSS,
author = "Neil Stuart Walton",
title = "Concave switching in single and multihop networks",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "139--151",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591987",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Switched queueing networks model wireless networks,
input queued switches and numerous other networked
communications systems. For single-hop networks, we
consider a $ (\alpha, g)$-switch policy which combines
the MaxWeight policies with bandwidth sharing networks
--- a further well studied model of Internet
congestion. We prove the maximum stability property for
this class of randomized policies. Thus these policies
have the same first order behavior as the MaxWeight
policies. However, for multihop networks some of these
generalized polices address a number of critical
weakness of the MaxWeight/BackPressure policies. For
multihop networks with fixed routing, we consider the
Proportional Scheduler (or $ (1, \log)$-policy). In
this setting, the BackPressure policy is maximum
stable, but must maintain a queue for every
route-destination, which typically grows rapidly with a
network's size. However, this proportionally fair
policy only needs to maintain a queue for each outgoing
link, which is typically bounded in number. As is
common with Internet routing, by maintaining per-link
queueing each node only needs to know the next hop for
each packet and not its entire route. Further, in
contrast to BackPressure, the Proportional Scheduler
does not compare downstream queue lengths to determine
weights, only local link information is required. This
leads to greater potential for decomposed
implementations of the policy. Through a reduction
argument and an entropy argument, we demonstrate that,
whilst maintaining substantially less queueing
overhead, the Proportional Scheduler achieves maximum
throughput stability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2014:POL,
author = "Longbo Huang and Xin Liu and Xiaohong Hao",
title = "The power of online learning in stochastic network
optimization",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "153--165",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591990",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we investigate the power of online
learning in stochastic network optimization with
unknown system statistics a priori. We are interested
in understanding how information and learning can be
efficiently incorporated into system control
techniques, and what are the fundamental benefits of
doing so. We propose two Online Learning-Aided Control
techniques, OLAC and OLAC2, that explicitly utilize the
past system information in current system control via a
learning procedure called dual learning. We prove
strong performance guarantees of the proposed
algorithms: OLAC and OLAC2 achieve the near-optimal $
[O(\epsilon), O([\log (1 / \epsilon)]^2)] $
utility-delay tradeoff and OLAC2 possesses an $
O(\epsilon^{-2 / 3}) $ convergence time. Simulation
results also confirm the superior performance of the
proposed algorithms in practice. To the best of our
knowledge, OLAC and OLAC2 are the first algorithms that
simultaneously possess explicit near-optimal delay
guarantee and sub-linear convergence time, and our
attempt is the first to explicitly incorporate online
learning into stochastic network optimization and to
demonstrate its power in both theory and practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2014:SRC,
author = "Predrag R. Jelenkovic and Evangelia D. Skiani",
title = "Is sharing with retransmissions causing
instabilities?",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "167--179",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592001",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Retransmissions represent a primary failure recovery
mechanism on all layers of communication network
architecture. Similarly, fair sharing, e.g. processor
sharing (PS), is a widely accepted approach to resource
allocation among multiple users. Recent work has shown
that retransmissions in failure-prone, e.g. wireless ad
hoc, networks can cause heavy tails and long delays. In
this paper, we discover a new phenomenon showing that
PS-based scheduling induces complete instability in the
presence of retransmissions, regardless of how low the
traffic load may be. This phenomenon occurs even when
the job sizes are bounded\slash fragmented, e.g.,
deterministic. Our analytical results are further
validated via simulation experiments. Moreover, our
work demonstrates that scheduling one job at a time,
such as first-come-first-serve, achieves stability and
should be preferred in these systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2014:NWC,
author = "Jian Tan and Yandong Wang and Weikuan Yu and Li
Zhang",
title = "Non-work-conserving effects in {MapReduce}: diffusion
limit and criticality",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "181--192",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592007",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sequentially arriving jobs share a MapReduce cluster,
each desiring a fair allocation of computing resources
to serve its associated map and reduce tasks. The model
of such a system consists of a processor sharing queue
for the MapTasks and a multi-server queue for the
ReduceTasks. These two queues are dependent through a
constraint that the input data of each ReduceTask are
fetched from the intermediate data generated by the
MapTasks belonging to the same job. A more generalized
form of MapReduce queueing model can capture the
essence of other distributed data processing systems
that contain interdependent processor sharing queues
and multi-server queues. Through theoretical modeling
and extensive experiments, we show that, this
dependence, if not carefully dealt with, can cause
non-work-conserving effects that negatively impact
system performance and scalability. First, we
characterize the heavy-traffic approximation. Depending
on how tasks are scheduled, the number of jobs in the
system can even exhibit jumps in diffusion limits,
resulting in prolonged job execution times. This
problem can be mitigated through carefully applying a
tie-breaking rule for ReduceTasks, which as a
theoretical finding has direct engineering
implications. Second, we empirically validate a
criticality phenomenon using experiments. MapReduce
systems experience an undesirable performance
degradation when they have reached certain critical
points, another finding that offers fundamental
guidance on managing MapReduce systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stoica:2014:CBD,
author = "Ion Stoica",
title = "Conquering big data with spark and {BDAS}",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "193--193",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2611389",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today, big and small organizations alike collect huge
amounts of data, and they do so with one goal in mind:
extract `value' through sophisticated exploratory
analysis, and use it as the basis to make decisions as
varied as personalized treatment and ad targeting.
Unfortunately, existing data analytics tools are slow
in answering queries, as they typically require to sift
through huge amounts of data stored on disk, and are
even less suitable for complex computations, such as
machine learning algorithms. These limitations leave
the potential of extracting value of big data
unfulfilled. To address this challenge, we are
developing Berkeley Data Analytics Stack (BDAS), an
open source data analytics stack that provides
interactive response times for complex computations on
massive data. To achieve this goal, BDAS supports
efficient, large-scale in-memory data processing, and
allows users and applications to trade between query
accuracy, time, and cost. In this talk, I'll present
the architecture, challenges, results, and our
experience with developing BDAS, with a focus on Apache
Spark, an in-memory cluster computing engine that
provides support for a variety of workloads, including
batch, streaming, and iterative computations. In a
relatively short time, Spark has become the most active
big data project in the open source community, and is
already being used by over one hundred of companies and
research institutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shamsi:2014:HSP,
author = "Zain Shamsi and Ankur Nandwani and Derek Leonard and
Dmitri Loguinov",
title = "{Hershel}: single-packet {OS} fingerprinting",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "195--206",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591972",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional TCP/IP fingerprinting tools (e.g., nmap)
are poorly suited for Internet-wide use due to the
large amount of traffic and intrusive nature of the
probes. This can be overcome by approaches that rely on
a single SYN packet to elicit a vector of features from
the remote server; however, these methods face
difficult classification problems due to the high
volatility of the features and severely limited amounts
of information contained therein. Since these
techniques have not been studied before, we first
pioneer stochastic theory of single-packet OS
fingerprinting, build a database of 116 OSes, design a
classifier based on our models, evaluate its accuracy
in simulations, and then perform OS classification of
37.8M hosts from an Internet-wide scan.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shahzad:2014:NCH,
author = "Muhammad Shahzad and Alex X. Liu",
title = "Noise can help: accurate and efficient per-flow
latency measurement without packet probing and time
stamping",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "207--219",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591988",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the growth in number and significance of the
emerging applications that require extremely low
latencies, network operators are facing increasing need
to perform latency measurement on per-flow basis for
network monitoring and troubleshooting. In this paper,
we propose COLATE, the first per-flow latency
measurement scheme that requires no probe packets and
time stamping. Given a set of observation points,
COLATE records packet timing information at each point
so that later for any two points, it can accurately
estimate the average and standard deviation of the
latencies experienced by the packets of any flow in
passing the two points. The key idea is that when
recording packet timing information, COLATE purposely
allows noise to be introduced for minimizing storage
space, and when querying the latency of a target flow,
COLATE uses statistical techniques to denoise and
obtain an accurate latency estimate. COLATE is designed
to be efficiently implementable on network middleboxes.
In terms of processing overhead, COLATE performs only
one hash and one memory update per packet. In terms of
storage space, COLATE uses less than 0.1 bit per
packet, which means that, on a backbone link with about
half a million packets per second, using a 256GB drive,
COLATE can accumulate time stamps of packets traversing
the link for over 1.5 years. We evaluated COLATE using
three real traffic traces that include a backbone
traffic trace, an enterprise network traffic trace, and
a data center traffic trace. Results show that COLATE
always achieves the required reliability for any given
confidence interval.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Viennot:2014:MSG,
author = "Nicolas Viennot and Edward Garcia and Jason Nieh",
title = "A measurement study of {Google Play}",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "221--233",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592003",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although millions of users download and use
third-party Android applications from the Google Play
store, little information is known on an aggregated
level about these applications. We have built
PlayDrone, the first scalable Google Play store
crawler, and used it to index and analyze over
1,100,000 applications in the Google Play store on a
daily basis, the largest such index of Android
applications. PlayDrone leverages various hacking
techniques to circumvent Google's roadblocks for
indexing Google Play store content, and makes
proprietary application sources available, including
source code for over 880,000 free applications. We
demonstrate the usefulness of PlayDrone in decompiling
and analyzing application content by exploring four
previously unaddressed issues: the characterization of
Google Play application content at large scale and its
evolution over time, library usage in applications and
its impact on application portability, duplicative
application content in Google Play, and the
ineffectiveness of OAuth and related service
authentication mechanisms resulting in malicious users
being able to easily gain unauthorized access to user
data and resources on Amazon Web Services and
Facebook.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2014:ITC,
author = "Chung Hwan Kim and Junghwan Rhee and Hui Zhang and
Nipun Arora and Guofei Jiang and Xiangyu Zhang and
Dongyan Xu",
title = "{IntroPerf}: transparent context-sensitive multi-layer
performance inference using system stack traces",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "235--247",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592008",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance bugs are frequently observed in commodity
software. While profilers or source code-based tools
can be used at development stage where a program is
diagnosed in a well-defined environment, many
performance bugs survive such a stage and affect
production runs. OS kernel-level tracers are commonly
used in post-development diagnosis due to their
independence from programs and libraries; however, they
lack detailed program-specific metrics to reason about
performance problems such as function latencies and
program contexts. In this paper, we propose a novel
performance inference system, called IntroPerf, that
generates fine-grained performance information --- like
that from application profiling tools --- transparently
by leveraging OS tracers that are widely available in
most commodity operating systems. With system stack
traces as input, IntroPerf enables transparent
context-sensitive performance inference, and diagnoses
application performance in a multi-layered scope
ranging from user functions to the kernel. Evaluated
with various performance bugs in multiple open source
software projects, IntroPerf automatically ranks
potential internal and external root causes of
performance bugs with high accuracy without any prior
knowledge about or instrumentation on the subject
software. Our results show IntroPerf's effectiveness as
a lightweight performance introspection tool for
post-development diagnosis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suneja:2014:NIB,
author = "Sahil Suneja and Canturk Isci and Vasanth Bala and
Eyal de Lara and Todd Mummert",
title = "Non-intrusive, out-of-band and out-of-the-box systems
monitoring in the cloud",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "249--261",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592009",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The dramatic proliferation of virtual machines (VMs)
in datacenters and the highly-dynamic and transient
nature of VM provisioning has revolutionized datacenter
operations. However, the management of these
environments is still carried out using re-purposed
versions of traditional agents, originally developed
for managing physical systems, or most recently via
newer virtualization-aware alternatives that require
guest cooperation and accessibility. We show that these
existing approaches are a poor match for monitoring and
managing (virtual) systems in the cloud due to their
dependence on guest cooperation and operational health,
and their growing lifecycle management overheads in the
cloud. In this work, we first present Near Field
Monitoring (NFM), our non-intrusive, out-of-band cloud
monitoring and analytics approach that is designed
based on cloud operation principles and to address the
limitations of existing techniques. NFM decouples
system execution from monitoring and analytics
functions by pushing monitoring out of the targets
systems' scope. By leveraging and extending VM
introspection techniques, our framework provides
simple, standard interfaces to monitor running systems
in the cloud that require no guest cooperation or
modification, and have minimal effect on guest
execution. By decoupling monitoring and analytics from
target system context, NFM provides ``always-on''
monitoring, even when the target system is
unresponsive. NFM also works ``out-of-the-box'' for any
cloud instance as it eliminates any need for installing
and maintaining agents or hooks in the monitored
systems. We describe the end-to-end implementation of
our framework with two real-system prototypes based on
two virtualization platforms. We discuss the new cloud
analytics opportunities enabled by our decoupled
execution, monitoring and analytics architecture. We
present four applications that are built on top of our
framework and show their use for across-time and
across-system analytics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnasamy:2014:BEU,
author = "Subhashini Krishnasamy and Siddhartha Banerjee and
Sanjay Shakkottai",
title = "The behavior of epidemics under bounded
susceptibility",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "263--275",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the sensitivity of epidemic behavior to
a bounded susceptibility constraint --- susceptible
nodes are infected by their neighbors via the regular
SI/SIS dynamics, but subject to a cap on the infection
rate. Such a constraint is motivated by modern social
networks, wherein messages are broadcast to all
neighbors, but attention spans are limited. Bounded
susceptibility also arises in distributed computing
applications with download bandwidth constraints, and
in human epidemics under quarantine policies. Network
epidemics have been extensively studied in literature;
prior work characterizes the graph structures required
to ensure fast spreading under the SI dynamics, and
long lifetime under the SIS dynamics. In particular,
these conditions turn out to be meaningful for two
classes of networks of practical relevance --- dense,
uniform (i.e., clique-like ) graphs, and sparse,
structured (i.e., star-like ) graphs. We show that
bounded susceptibility has a surprising impact on
epidemic behavior in these graph families. For the SI
dynamics, bounded susceptibility has no effect on
star-like networks, but dramatically alters the
spreading time in clique-like networks. In contrast,
for the SIS dynamics, clique-like networks are
unaffected, but star-like networks exhibit a sharp
change in extinction times under bounded
susceptibility. Our findings are useful for the design
of disease-resistant networks and infrastructure
networks. More generally, they show that results for
existing epidemic models are sensitive to modeling
assumptions in non-intuitive ways, and suggest caution
in directly using these as guidelines for real
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gabielkov:2014:SSN,
author = "Maksym Gabielkov and Ashwin Rao and Arnaud Legout",
title = "Studying social networks at scale: macroscopic anatomy
of the {Twitter} social graph",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "277--288",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591985",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Twitter is one of the largest social networks using
exclusively directed links among accounts. This makes
the Twitter social graph much closer to the social
graph supporting real life communications than, for
instance, Facebook. Therefore, understanding the
structure of the Twitter social graph is interesting
not only for computer scientists, but also for
researchers in other fields, such as sociologists.
However, little is known about how the information
propagation in Twitter is constrained by its inner
structure. In this paper, we present an in-depth study
of the macroscopic structure of the Twitter social
graph unveiling the highways on which tweets propagate,
the specific user activity associated with each
component of this macroscopic structure, and the
evolution of this macroscopic structure with time for
the past 6 years. For this study, we crawled Twitter to
retrieve all accounts and all social relationships
(follow links) among accounts; the crawl completed in
July 2012 with 505 million accounts interconnected by
23 billion links. Then, we present a methodology to
unveil the macroscopic structure of the Twitter social
graph. This macroscopic structure consists of 8
components defined by their connectivity
characteristics. Each component group users with a
specific usage of Twitter. For instance, we identified
components gathering together spammers, or celebrities.
Finally, we present a method to approximate the
macroscopic structure of the Twitter social graph in
the past, validate this method using old datasets, and
discuss the evolution of the macroscopic structure of
the Twitter social graph during the past 6 years.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buccapatnam:2014:SBS,
author = "Swapna Buccapatnam and Atilla Eryilmaz and Ness B.
Shroff",
title = "Stochastic bandits with side observations on
networks",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "289--300",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the stochastic multi-armed bandit (MAB)
problem in the presence of side-observations across
actions. In our model, choosing an action provides
additional side observations for a subset of the
remaining actions. One example of this model occurs in
the problem of targeting users in online social
networks where users respond to their friends's
activity, thus providing information about each other's
preferences. Our contributions are as follows: (1) We
derive an asymptotic (with respect to time) lower bound
(as a function of the network structure) on the regret
(loss) of any uniformly good policy that achieves the
maximum long term average reward. (2) We propose two
policies --- a randomized policy and a policy based on
the well-known upper confidence bound (UCB) policies,
both of which explore each action at a rate that is a
function of its network position. We show that these
policies achieve the asymptotic lower bound on the
regret up to a multiplicative factor independent of
network structure. The upper bound guarantees on the
regret of these policies are better than those of
existing policies. Finally, we use numerical examples
on a real-world social network to demonstrate the
significant benefits obtained by our policies against
other existing policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ok:2014:MDS,
author = "Jungseul Ok and Youngmi Jin and Jinwoo Shin and Yung
Yi",
title = "On maximizing diffusion speed in social networks:
impact of random seeding and clustering",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "301--313",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591991",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A variety of models have been proposed and analyzed to
understand how a new innovation (e.g., a technology, a
product, or even a behavior) diffuses over a social
network, broadly classified into either of
epidemic-based or game-based ones. In this paper, we
consider a game-based model, where each individual
makes a selfish, rational choice in terms of its payoff
in adopting the new innovation, but with some noise. We
study how diffusion effect can be maximized by seeding
a subset of individuals (within a given budget), i.e.,
convincing them to pre-adopt a new innovation. In
particular, we aim at finding `good' seeds for
minimizing the time to infect all others, i.e.,
diffusion speed maximization. To this end, we design
polynomial-time approximation algorithms for three
representative classes, Erd{\H{o}}s--R{\'e}enyi,
planted partition and geometrically structured graph
models, which correspond to globally well-connected,
locally well-connected with large clusters and locally
well-connected with small clusters, respectively,
provide their performance guarantee in terms of
approximation and complexity. First, for the dense
Erd{\H{o}}s--R{\'e}nyi and planted partition graphs, we
show that an arbitrary seeding and a simple seeding
proportional to the size of clusters are almost optimal
with high probability. Second, for geometrically
structured sparse graphs, including planar and d
-dimensional graphs, our algorithm that (a) constructs
clusters, (b) seeds the border individuals among
clusters, and (c) greedily seeds inside each cluster
always outputs an almost optimal solution. We validate
our theoretical findings with extensive simulations
under a real social graph. We believe that our results
provide new practical insights on how to seed over a
social network depending on its connection structure,
where individuals rationally adopt a new innovation. To
our best knowledge, we are the first to study such
diffusion speed maximization on the game-based
diffusion, while the extensive research efforts have
been made in epidemic-based models, often referred to
as influence maximization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yallouz:2014:TSS,
author = "Jose Yallouz and Ori Rottenstreich and Ariel Orda",
title = "Tunable survivable spanning trees",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "315--327",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591997",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Coping with network failures has become a major
networking challenge. The concept of tunable
survivability provides a quantitative measure for
specifying any desired level (0\%-100\%) of
survivability, thus offering flexibility in the routing
choice. Previous works focused on implementing this
concept on unicast transmissions. However, vital
network information is often broadcasted via spanning
trees. Accordingly, in this study, we investigate the
application of tunable survivability for efficient
maintenance of spanning trees under the presence of
failures. We establish efficient algorithmic schemes
for optimizing the level of survivability under various
QoS requirements. In addition, we derive theoretical
bounds on the number of required trees for maximum
survivability. Finally, through extensive simulations,
we demonstrate the effectiveness of the tunable
survivability concept in the construction of spanning
trees. Most notably, we show that, typically,
negligible reduction in the level of survivability
results in major improvement in the QoS performance of
the resulting spanning trees.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghit:2014:BRA,
author = "Bogdan Ghit and Nezih Yigitbasi and Alexandru Iosup
and Dick Epema",
title = "Balanced resource allocations across multiple dynamic
{MapReduce} clusters",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "329--341",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Running multiple instances of the MapReduce framework
concurrently in a multicluster system or datacenter
enables data, failure, and version isolation, which is
attractive for many organizations. It may also provide
some form of performance isolation, but in order to
achieve this in the face of time-varying workloads
submitted to the MapReduce instances, a mechanism for
dynamic resource (re-)allocations to those instances is
required. In this paper, we present such a mechanism
called Fawkes that attempts to balance the allocations
to MapReduce instances so that they experience similar
service levels. Fawkes proposes a new abstraction for
deploying MapReduce instances on physical resources,
the MR-cluster, which represents a set of resources
that can grow and shrink, and that has a core on which
MapReduce is installed with the usual data locality
assumptions but that relaxes those assumptions for
nodes outside the core. Fawkes dynamically grows and
shrinks the active MR-clusters based on a family of
weighting policies with weights derived from monitoring
their operation. We empirically evaluate Fawkes on a
multicluster system and show that it can deliver good
performance and balanced resource allocations, even
when the workloads of the MR-clusters are very uneven
and bursty, with workloads composed from both synthetic
and real-world benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berger:2014:RAQ,
author = "Daniel S. Berger and Martin Karsten and Jens Schmitt",
title = "On the relevance of adversarial queueing theory in
practice",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "343--354",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592006",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Adversarial Queueing Theory (AQT) has shown that
seemingly innocent traffic injection rates might lead
to unbounded queues in packet-switched networks ---
depending on scheduling strategies as well as
topological characteristics. Little attention has been
given to quantifying these effects in realistic network
configurations. In particular, the existing AQT
literature makes two unrealistic assumptions: infinite
buffers and perfect synchrony. Because finite buffers
inherently limit queue sizes, adversarial effects
ultimately lead to packet loss which we address in this
work. In addition, we study the effect of imperfect
network synchronization under the packet loss metric.
Our results, using analysis and simulation, indicate
that classical AQT examples appear harmless under
realistic assumptions but for a novel class of
adversaries considerably higher loss can be observed.
We introduce this class by giving examples of two new
AQT concepts to construct loss-efficient network
adversaries. Our analysis proves the robustness of
these new adversaries against randomized
de-synchronization effects in terms of variable link
delays and nodal processing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nachiappan:2014:GFE,
author = "Nachiappan Chidambaram Nachiappan and Praveen
Yedlapalli and Niranjan Soundararajan and Mahmut Taylan
Kandemir and Anand Sivasubramaniam and Chita R. Das",
title = "{GemDroid}: a framework to evaluate mobile platforms",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "355--366",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the demand for feature-rich mobile systems such as
smartphones and tablets has outpaced other computing
systems and is expected to continue at a faster rate,
it is projected that SoCs with tens of cores and
hundreds of IPs (or accelerator) will be designed to
provide unprecedented level of features and
functionality in future. Design of such mobile systems
with required QoS and power budgets along with other
design constraints will be a daunting task for computer
architects since any ad hoc, piece-meal solution is
unlikely to result in an optimal design. This requires
early exploration of the complete design space to
understand the system-level design trade-offs. To the
best of our knowledge, there is no such publicly
available tool to conduct a holistic evaluation of
mobile platforms consisting of cores, IPs and system
software. This paper presents GemDroid, a comprehensive
simulation infrastructure to address these concerns.
GemDroid has been designed by integrating the Android
open-source emulator for facilitating execution of
mobile applications, the GEM5 core simulator for
analyzing the CPU and memory centric designs, and
models for several IPs to collectively study their
impact on system-level performance and power. Analyzing
a spectrum of applications with GemDroid, we observed
that the memory subsystem is a vital cog in the mobile
platform because, it needs to handle both core and IP
traffic, which have very different characteristics.
Consequently, we present a heterogeneous memory
controller (HMC) design, where we divide the memory
physically into two address regions, where the first
region with one memory controller (MC) handles
core-specific application data and the second region
with another MC handles all IP related data. The
proposed modifications to the memory controller design
results in an average 25\% reduction in execution time
for CPU bound applications, up to 11\% reduction in
frame drops, and on average 17\% reduction in CPU busy
time for on-screen (IP bound) applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2014:UIN,
author = "Muhammad Zubair Shafiq and Jeffrey Erman and Lusheng
Ji and Alex X. Liu and Jeffrey Pang and Jia Wang",
title = "Understanding the impact of network dynamics on mobile
video user engagement",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "367--379",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591975",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mobile network operators have a significant interest
in the performance of streaming video on their networks
because network dynamics directly influence the Quality
of Experience (QoE). However, unlike video service
providers, network operators are not privy to the
client- or server-side logs typically used to measure
key video performance metrics, such as user engagement.
To address this limitation, this paper presents the
first large-scale study characterizing the impact of
cellular network performance on mobile video user
engagement from the perspective of a network operator.
Our study on a month-long anonymized data set from a
major cellular network makes two main contributions.
First, we quantify the effect that 31 different network
factors have on user behavior in mobile video. Our
results provide network operators direct guidance on
how to improve user engagement --- for example,
improving mean signal-to-interference ratio by 1 dB
reduces the likelihood of video abandonment by 2\%.
Second, we model the complex relationships between
these factors and video abandonment, enabling operators
to monitor mobile video user engagement in real-time.
Our model can predict whether a user completely
downloads a video with more than 87\% accuracy by
observing only the initial 10 seconds of video
streaming sessions. Moreover, our model achieves
significantly better accuracy than prior models that
require client- or server-side logs, yet we only use
standard radio network statistics and/or TCP/IP headers
available to network operators.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2014:EEC,
author = "Jiwei Huang and Sen Yang and Ashwin Lall and Justin
Romberg and Jun Xu and Chuang Lin",
title = "Error estimating codes for insertion and deletion
channels",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "381--393",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591976",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Error estimating codes (EEC) have recently been
proposed for measuring the bit error rate (BER) in
packets transmitted over wireless links. They however
can provide such measurements only when there are no
insertion and deletion errors, which could occur in
various wireless network environments. In this work, we
propose ``idEEC'', the first technique that can do so
even in the presence of insertion and deletion errors.
We show that idEEC is provable robust under most bit
insertion and deletion scenarios, provided
insertion/deletion errors occur with much lower
probability than bit flipping errors. Our idEEC design
can build upon any existing EEC scheme. The basic idea
of the idEEC encoding is to divide the packet into a
number of segments, each of which is encoded using the
underlying EEC scheme. The basic idea of the idEEC
decoding is to divide the packet into a few slices in a
randomized manner --- each of which may contain several
segments --- and then try to identify a slice that has
no insertion and deletion errors in it (called a
``clean slice''). Once such a clean slice is found, it
is removed from the packet for later processing, and
this ``randomized divide and search'' procedure will be
iteratively performed on the rest of the packet until
no more clean slices can be found. The BER will then be
estimated from all the clean slices discovered through
all the iterations. A careful analysis of the accuracy
guarantees of the idEEC decoding is provided, and the
efficacy of idEEC is further validated by simulation
experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meyfroyt:2014:DDP,
author = "Thomas M. M. Meyfroyt and Sem C. Borst and Onno J.
Boxma and Dee Denteneer",
title = "Data dissemination performance in large-scale sensor
networks",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "395--406",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591981",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the use of wireless sensor networks increases, the
need for (energy-)efficient and reliable broadcasting
algorithms grows. Ideally, a broadcasting algorithm
should have the ability to quickly disseminate data,
while keeping the number of transmissions low. In this
paper we develop a model describing the message count
in large-scale wireless sensor networks. We focus our
attention on the popular Trickle algorithm, which has
been proposed as a suitable communication protocol for
code maintenance and propagation in wireless sensor
networks. Besides providing a mathematical analysis of
the algorithm, we propose a generalized version of
Trickle, with an additional parameter defining the
length of a listen-only period. This generalization
proves to be useful for optimizing the design and usage
of the algorithm. For single-cell networks we show how
the message count increases with the size of the
network and how this depends on the Trickle parameters.
Furthermore, we derive distributions of
inter-broadcasting times and investigate their
asymptotic behavior. Our results prove conjectures made
in the literature concerning the effect of a
listen-only period. Additionally, we develop an
approximation for the expected number of transmissions
in multi-cell networks. All results are validated by
simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gorlatova:2014:MSK,
author = "Maria Gorlatova and John Sarik and Guy Grebla and Mina
Cong and Ioannis Kymissis and Gil Zussman",
title = "Movers and shakers: kinetic energy harvesting for the
{Internet of Things}",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "407--419",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Numerous energy harvesting wireless devices that will
serve as building blocks for the Internet of Things
(IoT) are currently under development. However, there
is still only limited understanding of the properties
of various energy sources and their impact on energy
harvesting adaptive algorithms. Hence, we focus on
characterizing the kinetic (motion) energy that can be
harvested by a wireless node with an IoT form factor
and on developing energy allocation algorithms for such
nodes. In this paper, we describe methods for
estimating harvested energy from acceleration traces.
To characterize the energy availability associated with
specific human activities (e.g., relaxing, walking,
cycling), we analyze a motion dataset with over 40
participants. Based on acceleration measurements that
we collected for over 200 hours, we study energy
generation processes associated with day-long human
routines. We also briefly summarize our experiments
with moving objects. We develop energy allocation
algorithms that take into account practical IoT node
design considerations, and evaluate the algorithms
using the collected measurements. Our observations
provide insights into the design of motion energy
harvesters, IoT nodes, and energy harvesting adaptive
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lai:2014:PLT,
author = "Chengdi Lai and Steven H. Low and Ka-Cheong Leung and
Victor O. K. Li",
title = "Pricing link by time",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "421--433",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The combination of loss-based TCP and drop-tail
routers often results in full buffers, creating large
queueing delays. The challenge with parameter tuning
and the drastic consequence of improper tuning have
discouraged network administrators from enabling AQM
even when routers support it. To address this problem,
we propose a novel design principle for AQM, called the
pricing-link-by-time (PLT) principle. PLT increases the
link price as the backlog stays above a threshold $
\beta $, and resets the price once the backlog goes
below $ \beta $. We prove that such a system exhibits
cyclic behavior that is robust against changes in
network environment and protocol parameters. While $
\beta $ approximately controls the level of backlog,
the backlog dynamics are invariant for $ \beta $ across
a wide range of values. Therefore, $ \beta $ can be
chosen to reduce delay without undermining system
performance. We validate these analytical results using
packet-level simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moharir:2014:SCU,
author = "Sharayu Moharir and Javad Ghaderi and Sujay Sanghavi
and Sanjay Shakkottai",
title = "Serving content with unknown demand: the
high-dimensional regime",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "435--447",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591978",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we look at content placement in the
high-dimensional regime: there are n servers, and $
O(n) $ distinct types of content. Each server can store
and serve $ O(1) $ types at any given time. Demands for
these content types arrive, and have to be served in an
online fashion; over time, there are a total of $ O(n)
$ of these demands. We consider the algorithmic task of
content placement: determining which types of content
should be on which server at any given time, in the
setting where the demand statistics (i.e. the relative
popularity of each type of content) are not known
a-priori, but have to be inferred from the very demands
we are trying to satisfy. This is the high-dimensional
regime because this scaling (everything being $ O(n)$)
prevents consistent estimation of demand statistics; it
models many modern settings where large numbers of
users, servers and videos/webpages interact in this
way. We characterize the performance of any scheme that
separates learning and placement (i.e. which use a
portion of the demands to gain some estimate of the
demand statistics, and then uses the same for the
remaining demands), showing it is order-wise strictly
suboptimal. We then study a simple adaptive scheme ---
which myopically attempts to store the most recently
requested content on idle servers --- and show it
outperforms schemes that separate learning and
placement. Our results also generalize to the setting
where the demand statistics change with time. Overall,
our results demonstrate that separating the estimation
of demand, and the subsequent use of the same, is
strictly suboptimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tune:2014:NDS,
author = "Paul Tune and Matthew Roughan",
title = "Network-design sensitivity analysis",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "449--461",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic matrices are used in many network engineering
tasks, for instance optimal network design.
Unfortunately, measurements of these matrices are
error-prone, a problem that is exacerbated when they
are extrapolated to provide the predictions used in
planning. Practical network design and management
should consider sensitivity to such errors, but
although robust optimisation techniques exist, it seems
they are rarely used, at least in part because of the
difficulty in generating an ensemble of admissible
traffic matrices with a controllable error level. We
address this problem in our paper by presenting a fast
and flexible technique of generating synthetic traffic
matrices. We demonstrate the utility of the method by
presenting a methodology for robust network design
based on adaptation of the mean-risk analysis concept
from finance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ai:2014:MSS,
author = "Lingqing Ai and Xian Wu and Lingxiao Huang and Longbo
Huang and Pingzhong Tang and Jian Li",
title = "The multi-shop ski rental problem",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "463--475",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591984",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the multi-shop ski rental problem. This
problem generalizes the classic ski rental problem to a
multi-shop setting, in which each shop has different
prices for renting and purchasing a pair of skis, and a
consumer has to make decisions on when and where to
buy. We are interested in the optimal online
(competitive-ratio minimizing) mixed strategy from the
consumer's perspective. For our problem in its basic
form, we obtain exciting closed-form solutions and a
linear time algorithm for computing them. We further
demonstrate the generality of our approach by
investigating three extensions of our basic problem,
namely ones that consider costs incurred by entering a
shop or switching to another shop. Our solutions to
these problems suggest that the consumer must assign
positive probability in exactly one shop at any buying
time. Our results apply to many real-world
applications, ranging from cost management in IaaS
cloud to scheduling in distributed computing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ding:2014:CCC,
author = "Wei Ding and Mahmut Kandemir",
title = "{CApRI}: {CAche}-conscious data reordering for
irregular codes",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "477--489",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591992",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Caches play a critical role in today's computer
systems and optimizing their performance has been a
critical objective in the last couple of decades.
Unfortunately, compared to a plethora of work in
software and hardware directed code/data optimizations,
much less effort has been spent in understanding the
fundamental characteristics of data access patterns
exhibited by application programs and their interaction
with the underlying cache hardware. Therefore, in
general it is hard to reason about cache behavior of a
program running on a target system. Motivated by this
observation, we first set up a `locality model' that
can help us determine the theoretical bounds of the
cache misses caused by irregular data accesses. We then
explain how this locality model can be used for
different data locality optimization purposes. After
that, based on our model, we propose a data reordering
(data layout reorganization) scheme that can be applied
after any existing data reordering schemes for
irregular applications to improve cache performance by
further reducing the cache misses. We evaluate the
effectiveness of our scheme using a set of 8 programs
with irregular data accesses, and show that it brings
significant improvements over the state-of-the-art on
two commercial multicore machines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cai:2014:NCA,
author = "Yu Cai and Gulay Yalcin and Onur Mutlu and Erich F.
Haratsch and Osman Unsal and Adrian Cristal and Ken
Mai",
title = "Neighbor-cell assisted error correction for {MLC NAND}
flash memories",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "491--504",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591994",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Continued scaling of NAND flash memory to smaller
process technology nodes decreases its reliability,
necessitating more sophisticated mechanisms to
correctly read stored data values. To distinguish
between different potential stored values, conventional
techniques to read data from flash memory employ a
single set of reference voltage values, which are
determined based on the overall threshold voltage
distribution of flash cells. Unfortunately, the
phenomenon of program interference, in which a cell's
threshold voltage unintentionally changes when a
neighboring cell is programmed, makes this conventional
approach increasingly inaccurate in determining the
values of cells. This paper makes the new empirical
observation that identifying the value stored in the
immediate-neighbor cell makes it easier to determine
the data value stored in the cell that is being read.
We provide a detailed statistical and experimental
characterization of threshold voltage distribution of
flash memory cells conditional upon the
immediate-neighbor cell values, and show that such
conditional distributions can be used to determine a
set of read reference voltages that lead to error rates
much lower than when a single set of reference voltage
values based on the overall distribution are used.
Based on our analyses, we propose a new method for
correcting errors in a flash memory page, neighbor-cell
assisted correction (NAC). The key idea is to re-read a
flash memory page that fails error correction codes
(ECC) with the set of read reference voltage values
corresponding to the conditional threshold voltage
distribution assuming a neighbor cell value and use the
re-read values to correct the cells that have neighbors
with that value. Our simulations show that NAC
effectively improves flash memory lifetime by 33\%
while having no (at nominal lifetime) or very modest
(less than 5\% at extended lifetime) performance
overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulur:2014:AAM,
author = "Nagendra Gulur and Mahesh Mehendale and Raman
Manikantan and Ramaswamy Govindarajan",
title = "{ANATOMY}: an analytical model of memory system
performance",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "505--517",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2591995",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Memory system design is increasingly influencing
modern multi-core architectures from both performance
and power perspectives. However predicting the
performance of memory systems is complex, compounded by
the myriad design choices and parameters along multiple
dimensions, namely (i) technology, (ii) design and
(iii) architectural choices. In this work, we construct
an analytical model of the memory system to comprehend
this diverse space and to study the impact of memory
system parameters from latency and bandwidth
perspectives. Our model, called ANATOMY, consists of
two key components that are coupled with each other, to
model the memory system accurately. The first component
is a queuing model of memory which models in detail
various design choices and captures the impact of
technological choices in memory systems. The second
component is an analytical model to summarize key
workload characteristics, namely row buffer hit rate
(RBH), bank-level parallelism (BLP), and request spread
(S) which are used as inputs to the queuing model to
estimate memory performance. We validate the model
across a wide variety of memory configurations on 4, 8
and 16 cores using a total of 44 workloads. ANATOMY is
able to predict memory latency with an average error of
8.1\%, 4.1\% and 9.7\% over 4, 8 and 16 core
configurations. We demonstrate the extensibility and
applicability of our model by exploring a variety of
memory design choices such as the impact of clock
speed, benefit of multiple memory controllers, the role
of banks and channel width, and so on. We also
demonstrate ANATOMY's ability to capture architectural
elements such as scheduling mechanisms (using FR\_FCFS
and PAR_BS) and impact of DRAM refresh cycles. In all
of these studies, ANATOMY provides insight into sources
of memory performance bottlenecks and is able to
quantitatively predict the benefit of redressing
them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Khan:2014:EEM,
author = "Samira Khan and Donghyuk Lee and Yoongu Kim and Alaa
R. Alameldeen and Chris Wilkerson and Onur Mutlu",
title = "The efficacy of error mitigation techniques for {DRAM}
retention failures: a comparative experimental study",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "519--532",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592000",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As DRAM cells continue to shrink, they become more
susceptible to retention failures. DRAM cells that
permanently exhibit short retention times are fairly
easy to identify and repair through the use of memory
tests and row and column redundancy. However, the
retention time of many cells may vary over time due to
a property called Variable Retention Time (VRT). Since
these cells intermittently transition between failing
and non-failing states, they are particularly difficult
to identify through memory tests alone. In addition,
the high temperature packaging process may aggravate
this problem as the susceptibility of cells to VRT
increases after the assembly of DRAM chips. A promising
alternative to manufacture-time testing is to detect
and mitigate retention failures after the system has
become operational. Such a system would require
mechanisms to detect and mitigate retention failures in
the field, but would be responsive to retention
failures introduced after system assembly and could
dramatically reduce the cost of testing, enabling much
longer tests than are practical with manufacturer
testing equipment. In this paper, we analyze the
efficacy of three common error mitigation techniques
(memory tests, guardbands, and error correcting codes
(ECC)) in real DRAM chips exhibiting both intermittent
and permanent retention failures. Our analysis allows
us to quantify the efficacy of recent system-level
error mitigation mechanisms that build upon these
techniques. We revisit prior works in the context of
the experimental data we present, showing that our
measured results significantly impact these works'
conclusions. We find that mitigation techniques that
rely on run-time testing alone [38, 27, 50, 26] are
unable to ensure reliable operation even after many
months of testing. Techniques that incorporate ECC[4,
52], however, can ensure reliable DRAM operation after
only a few hours of testing. For example, VS-ECC[4],
which couples testing with variable strength codes to
allocate the strongest codes to the most error-prone
memory regions, can ensure reliable operation for 10
years after only 19 minutes of testing. We conclude
that the viability of these mitigation techniques
depend on efficient online profiling of DRAM performed
without disrupting system operation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2014:GDM,
author = "Kaibo Wang and Xiaoning Ding and Rubao Lee and Shinpei
Kato and Xiaodong Zhang",
title = "{GDM}: device memory management for {GPGPU}
computing",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "533--545",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592002",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "GPGPUs are evolving from dedicated accelerators
towards mainstream commodity computing resources.
During the transition, the lack of system management of
device memory space on GPGPUs has become a major
hurdle. In existing GPGPU systems, device memory space
is still managed explicitly by individual applications,
which not only increases the burden of programmers but
can also cause application crashes, hangs, or low
performance. In this paper, we present the design and
implementation of GDM, a fully functional GPGPU device
memory manager to address the above problems and
unleash the computing power of GPGPUs in
general-purpose environments. To effectively coordinate
the device memory usage of different applications, GDM
takes control over device memory allocations and data
transfers to and from device memory, leveraging a
buffer allocated in each application's virtual memory.
GDM utilizes the unique features of GPGPU systems and
relies on several effective optimization techniques to
guarantee the efficient usage of device memory space
and to achieve high performance. We have evaluated GDM
and compared it against state-of-the-art GPGPU system
software on a range of workloads. The results show that
GDM can prevent applications from crashes, including
those induced by device memory leaks, and improve
system performance by up to 43\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Diegues:2014:EPC,
author = "Nuno Diegues and Paolo Romano and Lu{\'\i}s
Rodrigues",
title = "On the energy and performance of commodity hardware
transactional memory",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "547--548",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The advent of multi-core architectures has brought
concurrent programming to the forefront of software
development. In this context, Transactional Memory (TM)
has gained increasing popularity as a simpler,
attractive alternative to traditional lock-based
synchronization. The recent integration of Hardware TM
(HTM) in the last generation of Intel commodity
processors turned TM into a mainstream technology,
raising a number of questions on its future and that of
concurrent programming. To evaluate the potential
impact of Intel's HTM, we conducted the largest study
on TM to date, comparing different locking techniques,
hardware and software TMs, as well as different
combinations of these mechanisms, from the dual
perspective of performance and power consumption. As a
result we perform a workload characterization, to help
programmers better exploit the currently available TM
facilities, and identify important research
directions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2014:ICM,
author = "Qi Wang and Liang Liu and Jinbei Zhang and Xinyu Wang
and Xinbing Wang and Songwu Lu",
title = "Impact of correlated mobility and cluster scalability
on connectivity of wireless networks",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "549--550",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592012",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose the correlated mobile k-hop clustered
networks model to implement correlated node movements
and scalable clusters. We divide network states into
three categories, i.e., cluster-sparse state,
cluster-dense state and cluster-inferior dense state,
and achieve the critical transmission range for the
last two states. Furthermore, we find that correlated
mobility and cluster scalability are closely related
with each other and the impact of these two properties
on connectivity is mainly through influencing network
state transition.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tavakkol:2014:UPD,
author = "Arash Tavakkol and Mohammad Arjomand and Hamid
Sarbazi-Azad",
title = "Unleashing the potentials of dynamism for page
allocation strategies in {SSDs}",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "551--552",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592013",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In Solid-State Drives (SSDs) with tens of flash chips
and highly parallel architecture, we can speed up I/O
operations by well-utilizing resources during page
allocation. Proposals already exist for using static
page allocation which does not balance the IO load and
its efficiency depends on access address patterns. To
our best knowledge, there have been no research thus
far to show what happens if one or more internal
resources can be freely allocated regardless of the
request address. This paper explores the possibility of
using different degrees of dynamism in page allocation
and identifies key design opportunities that they
present to improve SSD's characteristics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mandayam:2014:TCM,
author = "Chinmoy V. Mandayam and Balaji Prabhakar",
title = "Traffic congestion: models, costs and optimal
transport",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "553--554",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592014",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop two models of highway traffic: (i) a
deterministic fluid model based on conservation laws
building on previous work and (ii) a mean-field model
of a series of infinite server queues, where each stage
in the tandem models a segment of highway. The models
define the ``highway-map''---a transformation of
time-varying arrival rate functions according to which
vehicles arrive at the highway to the corresponding
departure rate functions of vehicles exiting the
highway. The two models are shown to be equivalent in
that they obtain the same highway-map. The cost of
congestion for vehicles traversing the highway is the
total extra time they spend on the highway due to
congestion. This cost is shown to be equal to the
``d-bar'' distance between the input and the output
rate measures of the highway-map. This fact is used to
formulate a convex optimization problem for determining
the optimal way to shift users from peak to off-peak
hours using incentives so that congestion costs are
lowered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukhopadhyay:2014:RRS,
author = "Arpan Mukhopadhyay and Ravi R. Mazumdar",
title = "Randomized routing schemes for large processor sharing
systems with multiple service rates",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "555--556",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592015",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider randomized job routing techniques for a
system consisting of a large number of parallel
processor sharing servers with heterogeneous server
speeds. In particular, a scheme, that routes an
incoming job request to the server providing the
highest instantaneous processing rate per job among two
servers, chosen uniformly at random, is proposed. We
show that, unlike the homogeneous case, in the
heterogeneous case, such randomized dynamic schemes
need not always perform better than the optimal static
scheme (in which jobs are assigned to servers with
fixed probabilities independent of server states) in
terms of reducing the mean response time of jobs.
Specifically, we show that the stability region under
the proposed scheme is a subset of that under the
optimal static routing scheme. We also obtain the
stationary tail distribution of server occupancies for
the proposed scheme in the limit as the system size
grows to infinity. This distribution has been shown to
be insensitive to job length distribution and decay
super-exponentially.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tarvo:2014:AAM,
author = "Alexander Tarvo and Steven P. Reiss",
title = "Automated analysis of multithreaded programs for
performance modeling",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "557--558",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592016",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an approach for building performance models
of multithreaded programs automatically. We use a
combination of static and a dynamic analyses of a
single representative run of the program to build its
model. The model can predict performance of the program
under a variety of configurations. This paper outlines
how we construct the model and demonstrates how the
resultant models accurately predict the performance and
resource utilization of complex multithreaded
programs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arora:2014:CCP,
author = "Manish Arora and Srilatha Manne and Yasuko Eckert and
Indrani Paul and Nuwan Jayasena and Dean Tullsen",
title = "A comparison of core power gating strategies
implemented in modern hardware",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "559--560",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592017",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Idle power is a significant contributor to overall
energy consumption in modern multi-core processors.
Cores can enter a full-sleep state, also known as C6,
to reduce idle power; however, entering C6 incurs
performance and power overheads. Since power gating can
result in negative savings, hardware vendors implement
various algorithms to manage C6 entry. In this paper,
we examine state-of-the-art C6 entry algorithms and
present a comparative analysis in the context of
consumer and CPU-GPU benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ray:2014:TMN,
author = "Avik Ray and Sujay Sanghavi and Sanjay Shakkottai",
title = "Topic modeling from network spread",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "561--562",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592018",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Topic modeling refers to the task of inferring, only
from data, the abstract ``topics'' that occur in a
collection of content. In this paper we look at latent
topic modeling in a setting where unlike traditional
topic modeling (a) there are no/few features (like
words in documents) that are directly indicative of
content topics (e.g. un-annotated videos and images,
URLs etc.), but (b) users share and view content over a
social network. We provide a new algorithm for
inferring both the topics in which every user is
interested, and thus also the topics in each content
piece. We study its theoretical performance and
demonstrate its empirical effectiveness over standard
topic modeling algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mahmud:2014:BBC,
author = "A. Hasan Mahmud and Yuxiong He and Shaolei Ren",
title = "{BATS}: budget-constrained autoscaling for cloud
performance optimization",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "563--564",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592019",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ammar:2014:WYC,
author = "Ammar Ammar and Sewoong Oh and Devavrat Shah and Luis
Filipe Voloch",
title = "{What}'s your choice?: learning the mixed
multi-nomial",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "565--566",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computing a ranking over choices using consumer data
gathered from a heterogeneous population has become an
indispensable module for any modern consumer
information system, e.g. Yelp, Netflix, Amazon and
app-stores like Google play. In such applications, a
ranking or recommendation algorithm needs to extract
meaningful information from noisy data accurately and
in a scalable manner. A principled approach to resolve
this challenge requires a model that connects
observations to recommendation decisions and a
tractable inference algorithm utilizing this model. To
that end, we abstract the preference data generated by
consumers as noisy, partial realizations of their
innate preferences, i.e. orderings or permutations over
choices. Inspired by the seminal works of Samuelson
(cf. axiom of revealed preferences ) and that of
McFadden (cf. discrete choice models for
transportation), we model the population's innate
preferences as a mixture of the so called Multinomial
Logit (MMNL) model. Under this model, the
recommendation problem boils down to (a) learning the
MMNL model from population data, (b) finding am MNL
component within the mixture that closely represents
the revealed preferences of the consumer at hand, and
(c) recommending other choices to her/him that are
ranked high according to thus found component. In this
work, we address the problem of learning MMNL model
from partial preferences. We identify fundamental
limitations of any algorithm to learn such a model as
well as provide conditions under which, a simple,
data-driven (non-parametric) algorithm learns the model
effectively. The proposed algorithm has a pleasant
similarity to the standard collaborative filtering for
scalar (or star) ratings, but in the domain of
permutations. This work advances the state-of-art in
the domain of learning distribution over permutations
(cf. [2]) as well as in the context of learning mixture
distributions (cf. [4]).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2014:RCC,
author = "Muhammad Zubair Shafiq and Alex X. Liu and Amir R.
Khakpour",
title = "Revisiting caching in content delivery networks",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "567--568",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Content Delivery Networks (CDNs) differ from other
caching systems in terms of both workload
characteristics and performance metrics. However, there
has been little prior work on large-scale measurement
and characterization of content requests and caching
performance in CDNs. For workload characteristics, CDNs
deal with extremely large content volume, high content
diversity, and strong temporal dynamics. For
performance metrics, other than hit ratio, CDNs also
need to minimize the disk operations and the volume of
traffic from origin servers. In this paper, we conduct
a large-scale measurement study to characterize the
content request patterns using real-world data from a
commercial CDN provider.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2014:FSL,
author = "Qiang Xu and Thomas Andrews and Yong Liao and
Stanislav Miskovic and Z. Morley Mao and Mario Baldi
and Antonio Nucci",
title = "{FLOWR}: a self-learning system for classifying
mobile-application traffic",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "569--570",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dong:2014:ART,
author = "Wei Dong and Xuefeng Zhang and Jiliang Wang and Yi Gao
and Chun Chen and Jiajun Bu",
title = "Accurate and robust time reconstruction for deployed
sensor networks",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "571--572",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The notion of global time is of great importance for
many sensor network applications. To achieve
microsecond accuracy, MAC-level timestamping is
required for recording packet transmission and
reception times. The MAC-level timestamps, however, are
known to be error-prone, especially with low power
listening techniques. In this paper, we propose ART, an
accurate and robust time reconstruction approach to
detecting invalid timestamps and recovering the needed
information. We evaluate our approach in both testbed
and a real-world deployment. Results show ART is
accurate and robust for deployed sensor networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2014:EPS,
author = "Shaoquan Zhang and Longbo Huang and Minghua Chen and
Xin Liu",
title = "Effect of proactive serving on user delay reduction in
service systems",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "573--574",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592024",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In online service systems, delay experienced by a user
from the service request to the service completion is
one of the most critical performance metrics. To
improve user delay experience, in this paper, we
investigate a novel aspect of system design: proactive
serving, where the system can predict future user
request arrivals and allocate its capacity to serve
these upcoming requests proactively. In particular, we
investigate the average user delay under proactive
serving from a queuing theory perspective. We show that
proactive serving reduces the average user delay
exponentially (as a function of the prediction window
size) under M/M/1 queueing models. Our simulation
results show that, for G/G/1 queueing models, the
average user delay also decreases significantly under
proactive serving.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kong:2014:OES,
author = "Fanxin Kong and Xue Liu and Lei Rao",
title = "Optimal energy source selection and capacity planning
for green datacenters",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "575--576",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To reduce cost and emission, modern datacenter
operators are beginning to incorporate green energy
sources into datacenters' power supply. To improve
service availability, they also back up datacenters
using traditional (usually brown) energy sources.
However, challenge arises due to distinct
characteristics of energy sources used for different
goals. How to select optimal energy sources and plan
their capacity for datacenters to meet cost, emission
and service availability requirement remains an open
research problem. In this extended abstract, we briefly
describe recent work in [4], which provides a holistic
solution to address this problem. In [4], we present
GreenPlanning, a framework to strike a judicious
balance among multiple energy sources, the electrical
grid and energy storage devices for a datacenter in
terms of cost, emission, and service availability.
GreenPlanning explores different features and
operations of both green and traditional energy sources
available to datacenters. The framework minimizes the
lifetime total cost including both capital and
operational cost for a datacenter. We conduct extensive
experiments to evaluate GreenPlanning with real-life
computational workload and meteorological data traces.
Results demonstrate that GreenPlanning can reduce the
lifetime total cost and emission by more than 50\%
compared to traditional configurations without
integration of green energy, while still meeting
service availability requirement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shin:2014:SUI,
author = "Jinwoo Shin and Tonghoon Suk",
title = "Scheduling using interactive oracles: connection
between iterative optimization and low-complexity
scheduling",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "577--578",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since Tassiulas and Ephremides proposed the maximum
weight scheduling algorithm of throughput-optimality
for constrained queueing networks in 1992, extensive
research efforts have been made for resolving its high
complexity issue under various directions. In this
paper, we resolve this issue by developing a generic
framework for designing throughput-optimal and
low-complexity scheduling algorithms. Under the
framework, an algorithm updates current schedules via
an interaction with a given oracle system that
generates a solution of a certain discrete optimization
problem in a finite number of interactive queries. The
complexity of the resulting algorithm is decided by the
number of operations required for an oracle processing
a single query, which is typically very small. Somewhat
surprisingly, we prove that an algorithm using any such
oracle is throughput-optimal for general constrained
queueing network models that arise in the context of
emerging large-scale communication networks. To our
best knowledge, our result is the first that
establishes a rigorous connection between iterative
optimization methods and low-complexity scheduling
algorithms, which we believe provides various future
directions and new insights in both areas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rallapalli:2014:ULF,
author = "Swati Rallapalli and Wei Dong and Lili Qiu and Yin
Zhang",
title = "Unified localization framework using trajectory
signatures",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "579--580",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a novel trajectory-based localization
scheme which (i) identifies a user's current trajectory
based on the measurements collected while the user is
moving, by finding the best match among the training
traces (trajectory matching) and then (ii) localizes
the user on the trajectory (localization). The core
requirement of both the steps is an accurate and robust
algorithm to match two time-series that may contain
significant noise and perturbation due to differences
in mobility, devices, and environments. To achieve
this, we develop an enhanced Dynamic Time Warping (DTW)
alignment, and apply it to RSS, channel state
information, or magnetic field measurements collected
from a trajectory. We use indoor and outdoor
experiments to demonstrate its effectiveness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kang:2014:TCT,
author = "Dong Hyun Kang and Changwoo Min and Young Ik Eom",
title = "{TS-CLOCK}: temporal and spatial locality aware buffer
replacement algorithm for {NAND} flash storages",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "581--582",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "NAND flash storage is widely adopted in all classes of
computing devices. However, random write performance
and lifetime issues remain to be addressed. In this
paper, we propose a novel buffer replacement algorithm
called TS-CLOCK that effectively resolves the remaining
problems. Our experimental results show that TS-CLOCK
outperforms state-of-the-art algorithms in terms of
performance and lifetime.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2014:MSM,
author = "Juhoon Kim and Yung-Chih Chen and Ramin Khalili and
Don Towsley and Anja Feldmann",
title = "Multi-source multipath {HTTP (mHTTP)}: a proposal",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "583--584",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today, most devices have multiple network interfaces.
Coupled with wide-spread replication of popular content
at multiple locations, this provides substantial path
diversity in the Internet. We propose Multi-source
Multipath HTTP, mHTTP, which takes advantage of all
existing types of path diversity in the Internet. mHTTP
needs only client-side but not server-side or network
modifications as it is a receiver-oriented mechanism.
Moreover, the modifications are restricted to the
socket interface. Thus, no changes are needed to the
applications or to the kernel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vlachou:2014:PAM,
author = "Christina Vlachou and Albert Banchs and Julien Herzen
and Patrick Thiran",
title = "Performance analysis of {MAC} for power-line
communications",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "585--586",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the IEEE 1901 MAC protocol, the
dominant protocol for high data rate power-line
communications. 1901 employs a CSMA/CA mechanism
similar to --- but much more complex than --- the
backoff mechanism of 802.11. Because of this extra
complexity, and although this mechanism is the only
widely used MAC layer for power-line networks, there
are few analytical results on its performance. We
propose a model for the 1901 MAC that comes in the form
of a single fixed-point equation for the collision
probability. We prove that this equation admits a
unique solution, and we evaluate the accuracy of our
model by using simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vu:2014:IDC,
author = "Long Vu and Deepak Turaga and Srinivasan
Parthasarathy",
title = "Impact of {DHCP} churn on network characterization",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "587--588",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the DHCP churn impact on network
characterization by analyzing 18 months of DHCP, DNS,
Firewall Alert, and Netflow data collected from an
enterprise network of 30,000 clients. We find that DHCP
churn has clear impact on network metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2014:OAJ,
author = "Yang Guo and Alexander L. Stolyar and Anwar Walid",
title = "Online algorithms for joint
application-{VM}-physical-machine auto-scaling in a
cloud",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "589--590",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop shadow routing based online algorithms for
the joint problem of application-to-VM and VM-to-PM
assignments in a cloud environment. The asymptotic
optimality of the shadow algorithm is proved and the
performance is evaluated by simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2014:DOL,
author = "Jia Liu and Cathy H. Xia and Ness B. Shroff and Hanif
D. Sherali",
title = "Distributed optimal load shedding for disaster
recovery in smart electric power grids: a second-order
approach",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "591--592",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the problem of distributed
load shedding optimization for disaster recovery in
smart grids. We develop distributed second-order
interior-point based load shedding algorithms that
enjoy a fast quadratic convergence rate. Our main
contributions are two-fold: (i) We propose a rooted
spanning tree based reformulation that enables our
distributed algorithm design; (ii) Based on the
spanning tree reformulation, we design distributed
computation schemes for our proposed second-order
interior-point based load shedding. Collectively, these
results serve as an important first step in load
shedding and disaster recovery that uses second-order
distributed techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clegg:2014:TSS,
author = "Richard G. Clegg and Raul Landa and Jo{\~a}o Taveira
Ara{\'u}jo and Eleni Mykoniati and David Griffin and
Miguel Rio",
title = "{TARDIS}: stably shifting traffic in space and time",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "593--594",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes TARDIS (Traffic Assignment and
Retiming Dynamics with Inherent Stability) which is an
algorithmic procedure designed to reallocate traffic
within Internet Service Provider (ISP) networks. Recent
work has investigated the idea of shifting traffic in
time (from peak to off-peak) or in space (by using
different links). This work gives a unified scheme for
both time and space shifting to reduce costs.
Particular attention is given to the commonly used 95th
percentile pricing scheme. The work has three main
innovations: firstly, introducing the Shapley Gradient,
a way of comparing traffic pricing between different
links at different times of day; secondly, a unified
way of reallocating traffic in time and/or in space;
thirdly, a continuous approximation to this system is
proved to be stable. A trace-driven investigation using
data from two service providers shows that the
algorithm can create large savings in transit costs
even when only small proportions of the traffic can be
shifted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berger:2014:EAT,
author = "Daniel S. Berger and Philipp Gland and Sahil Singla
and Florin Ciucu",
title = "Exact analysis of {TTL} cache networks: the case of
caching policies driven by stopping times",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "595--596",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TTL caching models have recently regained significant
research interest, largely due to their ability to fit
popular caching policies such as LRU. In this extended
abstract we briefly describe our recent work on two
exact methods to analyze TTL cache networks. The first
method generalizes existing results for line networks
under renewal requests to the broad class of caching
policies whereby evictions are driven by stopping
times. The obtained results are further generalized,
using the second method, to feedforward networks with
Markov arrival processes (MAP) requests. MAPs are
particularly suitable for non-line networks because
they are closed not only under superposition and
splitting, as known, but also under input-output
caching operations as proven herein for phase-type TTL
distributions. The crucial benefit of the two closure
properties is that they jointly enable the first exact
analysis of feedforward networks of TTL caches in great
generality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jyothi:2014:MTD,
author = "Sangeetha Abdu Jyothi and Ankit Singla and P. Brighten
Godfrey and Alexandra Kolla",
title = "Measuring throughput of data center network
topologies",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "597--598",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "High throughput is a fundamental goal of network
design. While myriad network topologies have been
proposed to meet this goal, particularly in data center
and HPC networking, a consistent and accurate method of
evaluating a design's throughput performance and
comparing it to past proposals is conspicuously absent.
In this work, we develop a framework to benchmark the
throughput of network topologies and apply this
methodology to reveal insights about network structure.
We show that despite being commonly used, cut-based
metrics such as bisection bandwidth are the wrong
metrics: they yield incorrect conclusions about the
throughput performance of networks. We therefore
measure flow-based throughput directly and show how to
evaluate topologies with nearly-worst-case traffic
matrices. We use the flow-based throughput metric to
compare the throughput performance of a variety of
computer networks. We have made our evaluation
framework freely available to facilitate future work on
design and evaluation of networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2014:ETR,
author = "Da Wang and Gauri Joshi and Gregory Wornell",
title = "Efficient task replication for fast response times in
parallel computation",
journal = j-SIGMETRICS,
volume = "42",
number = "1",
pages = "599--600",
month = jun,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2637364.2592042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 27 06:38:48 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale distributed computing systems divide a job
into many independent tasks and run them in parallel on
different machines. A challenge in such parallel
computing is that the time taken by a machine to
execute a task is inherently variable, and thus the
slowest machine becomes the bottleneck in the
completion of the job. One way to combat the
variability in machine response is to replicate tasks
on multiple machines and waiting for the machine that
finishes first. While task replication reduces response
time, it generally increases resource usage. In this
work, we propose a theoretical framework to analyze the
trade-off between response time and resource usage.
Given an execution time distribution for machines, our
analysis gives insights on when and why replication
helps. We also propose efficient scheduling algorithms
for large-scale distributed computing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchholz:2014:JLC,
author = "Peter Buchholz and Benny {Van Houdt}",
title = "Joint latency and cost optimization for erasure-coded
data center storage",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "3--14",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern distributed storage systems offer large
capacity to satisfy the exponentially increasing need
of storage space. They often use erasure codes to
protect against disk and node failures to increase
reliability, while trying to meet the latency
requirements of the applications and clients. This
paper provides an insightful upper bound on the average
service delay of such erasure-coded storage with
arbitrary service time distribution and consisting of
multiple heterogeneous files. Not only does the result
supersede known delay bounds that only work for
homogeneous files, it also enables a novel problem of
joint latency and storage cost minimization over three
dimensions: selecting the erasure code, placement of
encoded chunks, and optimizing scheduling policy. The
problem is efficiently solved via the computation of a
sequence of convex approximations with provable
convergence. We further prototype our solution in an
open-source, cloud storage deployment over three
geographically distributed data centers. Experimental
results validate our theoretical delay analysis and
show significant latency reduction, providing valuable
insights into the proposed latency-cost tradeoff in
erasure-coded storage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2014:RPS,
author = "Bo Zhang and Guodong Pang and Bert Zwart",
title = "Refining piecewise stationary approximation for a
{Markov}-regulated fluid queue",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "15--17",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a refinement of the Piecewise
Stationary Approximation for the stationary
distribution of a Markov-regulated uid queue. The
refinement is analytically justified. Its accuracy and
utility are demonstrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Izagirre:2014:LTP,
author = "A. Izagirre and A. M. Makowski",
title = "Light traffic performance under the power of two load
balancing strategy: the case of server heterogeneity",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "18--20",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667527",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the power-of-two policy with d = 2,
Poisson job arrivals, heterogeneous servers and a
general job requirement distribution. With the help of
the first two light traffic derivatives for the average
job response time, we point to interesting structural
features associated with server heterogeneity in light
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shioda:2014:RWB,
author = "Shigeo Shioda",
title = "Random walk based biased sampling for data collection
on communication networks",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "21--23",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667528",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sampling via random walks is the first choice for
collecting random samples of online-social networks,
peer-to-peer networks, and the World Wide Web. This
paper proposes an algorithm for random-walk sampling,
which allows us to collect a biased (non-random)
sample, depending on which nodes are to be investigated
in detail. Since the stationary distribution of a
random walker under the proposed algorithm can be
analytically derived, the bias involved in a collected
sample can be removed using the notion of change of
measure in probability theory, which is also presented
in this paper. The effectiveness of the proposals is
verified using simulation experiments based on the data
of real networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haddad:2014:SEE,
author = "Majed Haddad and Oussama Habachi and Piotr Wiecek and
Yezekael Hayel",
title = "Spectral efficiency of energy efficient multicarrier
systems",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "24--26",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667529",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the achievable performances of
multi-carrier energy efficient power control game. Both
the simultaneous-move and the hierarchical games are
addressed. For the first time, we derive analytical
closed-form expressions of the spectrum coordination
and the spectral efficiency of such models. Our results
indicate that the spectrum coordination capability
induced by the power control game model enables the
wireless network to enjoy the energy efficiency
improvement while still achieving a high spectral
efficiency. Such an important result offers insights
into how to design power control in multi-carrier radio
environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2014:MCI,
author = "Tao Zhang and Guangshuo Chen and Wei Shu and Min-You
Wu",
title = "Microarchitectural characterization of irregular
applications on {GPGPUs}",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "27--29",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, GPGPUs have experienced tremendous
growth as general-purpose and high-throughput computing
devices. However, irregular applications cannot fully
utilize the hardware resource because of their plenty
of control-flow divergences, irregular memory accesses
and load imbalances. The lack of in-depth
characterization and quantifying the ways in which
irregular applications differ from regular ones on
GPGPUs has prevented users from effectively making use
of the hardware resource. We examine a suite of
representative irregular applications on a
cycle-accurate GPU simulator. We characterize their
performance aspects and analyze the bottlenecks. We
also assess the impact of changes in cache, DRAM and
interconnect and discuss the implications for GPU
architecture design. This work is useful in
understanding and optimizing irregular applications on
GPUs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2014:CPC,
author = "Jayakrishnan Nair and Vijay G. Subramanian and Adam
Wierman",
title = "On competitive provisioning of cloud services",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "30--32",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by cloud services, we consider the interplay
of network effects, congestion, and competition in
ad-supported services. We study the strategic
interactions between competing service providers and a
user base, modeling congestion sensitivity and two
forms of positive network effects: ``firm-specific''
versus ``industry-wide.'' Our analysis reveals that
users are generally no better off due to the
competition in a marketplace of ad-supported services.
Further, our analysis highlights an important contrast
between firm-specific and industry-wide network
effects: firms can coexist in a marketplace with
industry-wide network effects, but near-monopolies tend
to emerge in marketplaces with firm-specific network
effects.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bosman:2014:PCT,
author = "Joost Bosman and Jayakrishnan Nair and Bert Zwart",
title = "On the probability of current and temperature
overloading in power grids: a large deviations
approach",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "33--35",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and trade-offs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications. On June 20, 2014,
the 16th Workshop on MAthematical performance Modeling
and Analysis (MAMA 2014) was held in Austin TX, USA,
sponsored by ACM SIGMETRICS, and held in conjunction
with SIGMETRICS 2014. This workshop seeks to bring
together researchers working on the mathematical,
methodological and theoretical aspects of performance
analysis, modeling and optimization. It is intended to
provide a forum at SIGMETRICS conferences for talks on
early research in the more mathematical areas of
computer performance analysis. These talks tend to be
based on very recent research results (including work
in progress) or on new research results that will be
otherwise submitted only to a journal (or recently have
been submitted to a journal). Thus, part of the goal is
to complement and supplement the SIGMETRICS Conference
program with such talks without removing any
theoretical contributions from the main technical
program. Furthermore, we continue to experience the
desired result of having abstracts from previous MAMA
workshops appear as full papers in the main program of
subsequent SIGMETRICS and related conferences. All
submissions were reviewed by at least 4 members of the
program committee, from which a total of 13 were
selected for presentation at the MAMA 2014 workshop.
This special issue of Performance Evaluation Review
includes extended abstracts relating to these
presentations (arranged in the order of their
presentation), which cover a wide range of topics in
the area of mathematical performance analysis, modeling
and optimization. The study of Gelenbe examines the
backlog of energy and of data packets in a sensor node
that harvests energy, computing the properties of
energy and data backlogs and discussing system
stability. Meyfroyt derives asymptotic results for the
coverage ratio under a specific class of spatial
stochastic models (Cooperative Sequential Adsorption)
and investigates the scalability of the Trickle
communication protocol algorithm. The study of Tune and
Roughan applies the principle of maximum entropy to
develop fast traffic matrix synthesis models, with the
future goal of developing realistic spatio-temporal
traffic matrices. Bradonji{\'c} et al. compare and
contrast the capacity, congestion and reliability
requirements for alternative connectivity models of
large-scale data centers relative to fat trees. The
study of Rochman et al. considers the problem of
resource placement in network applications, based on a
largescale service faced with regionally distributed
demands for various resources in cloud computing. Xie
and Lui investigate the design and analysis of a rating
system and a mechanism to encourage users to
participate in crowdsourcing and to incentivize workers
to develop high-quality solutions. The study of Asadi
et al. formulates a general problem for the joint
per-user mode selection, connection activation and
resource scheduling of connections using both LTE and
WiFi resources within the context of device-to-device
communications. Zheng and Tan consider a nonconvex
joint rate and power control optimization to achieve
egalitarian fairness (max-min weighted fairness) in
wireless networks, exploiting the nonlinear
Perron--Frobenius theory and nonnegative matrix theory.
The study of Goldberg et al. derives an asymptotically
optimal control policy for a stochastic capacity
problem of dynamically matching supply resources and
uncertain demand, based on connections with lost-sales
inventory models. Ghaderi et al. investigate a dynamic
stochastic bin packing problem, analyzing the fluid
limits of the system under an asymptotic best-fit
algorithm and showing it asymptotically minimizes the
number of servers used in steady state. The study of
Tizghadam and Leon-Garcia examines the impact of
overlaying or removing a subgraph on the Moore--Penrose
inverse of the Laplacian matrix of an existing network
topology and proposes an iterative method to find key
performance measures. Miyazawa considers a two-node
generalized Jackson network in a phase-type setting as
a special case of a Markov-modulated two-dimensional
reflecting random walk and analyzes the tail
asymptotics for this reflecting process. The study of
Squillante et al. investigates improvement in
scalability of search in networks through the use of
multiple random walks, deriving bounds on the hitting
time to a set of nodes and on various performance
metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:2014:SNE,
author = "Erol Gelenbe",
title = "A sensor node with energy harvesting",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "37--39",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meyfroyt:2014:CSA,
author = "Thomas M. M. Meyfroyt",
title = "A cooperative sequential adsorption model for wireless
gossiping",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "40--42",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless sensor networks require communication
protocols for efficiently maintaining data in a
distributed fashion. The Trickle algorithm is a popular
protocol serving as the basis for many of the current
standard communication protocols. In this paper we
develop a mathematical model describing how Trickle
maintains data, establish a relationship with a class
of spatial stochastic models known as Cooperative
Sequential Adsorption (CSA). We derive asymptotic
results for the coverage ratio for a specific class of
CSA models and investigate the scalability of the
Trickle algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tune:2014:MET,
author = "Paul Tune and Matthew Roughan",
title = "Maximum entropy traffic matrix synthesis",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "43--45",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The traffic matrix (TM) is an important input in
traffic engineering and network design. However, the
design of current synthesis models of TMs has been
rather ad hoc, and does not necessarily conform to
observed traffic constraints. We apply the principle of
maximum entropy to develop fast TM synthesis models,
with the future goal of developing realistic
spatio-temporal TMs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bradonjic:2014:SCR,
author = "Milan Bradonji{\'c} and Iraj Saniee and Indra
Widjaja",
title = "Scaling of capacity and reliability in data center
networks",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "46--48",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The traditional connectivity model within the data
center is that of a hierarchical tree with redundant
connections (``fat tree') and with a top node
consisting of one or more routers that bring in (and
send out completed) requests for processing. In this
paper we examine alternative connectivity models for
large-scale data centers. In the first model, we
examine hypergrids as the structure connecting switches
and routers to the edge server racks. In the second
model, we examine random graphs as the interconnecting
network. We compare and contrast the capacity,
congestion and reliability requirements for these
relative to fat-trees. We show that, as the system size
increases and for uniform switch-end-to- switch-end
demand, the fat-tree configuration emerges as an
expensive option demanding higher port density switches
but has low congestion and high reliability. In
contrast, the random graph model shows the same low
level of congestion, lower cost due to fewer ports and
reasonable reliability, whereas the hypergrid model
does not require scaling of switch ports, provides high
reliability but exhibits higher congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rochman:2014:ERP,
author = "Yuval Rochman and Hanoch Levy and Eli Brosh",
title = "Efficient resource placement in cloud computing and
network applications",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "49--51",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We address the problem of resource placement in
general networking applications, in particular cloud
computing. We consider a large-scale service faced by
regionally distributed demands for various resources.
The service aims at placing the resources across
regions to maximize profit, accounting for demand
granting revenues minus resource placement costs. Cloud
computing and online services, utilizing regional
datacenters and facing the problem of where and how
much to place various servers, naturally fall under
this paradigm. The main challenge posed by this setting
is the need to deal with arbitrary multi-dimensional
stochastic demands. We show that, despite the
challenging stochastic combinatorial complexity, one
can optimize the system operation using fairly
efficient algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2014:MCS,
author = "Hong Xie and John C. S. Lui",
title = "Modeling crowdsourcing systems: design and analysis of
incentive mechanism and rating system",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "52--54",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Asadi:2014:MDC,
author = "Arash Asadi and Peter Jacko and Vincenzo Mancuso",
title = "Modeling {D2D} communications with {LTE} and {WiFi}",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "55--57",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we propose a roadmap towards the
analytical understanding of Device-to-Device (D2D)
communications in LTE-A networks. Various D2D solutions
have been proposed, which include inband and outband
D2D transmission modes, each of which exhibits
different pros and cons in terms of complexity,
interference, and spectral efficiency achieved. We go
beyond traditional mode optimization and mode-selection
schemes. Specifically, we formulate a general problem
for the joint per-user mode selection, connection
activation and resource scheduling of connections using
both LTE and WiFi resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zheng:2014:EFF,
author = "Liang Zheng and Chee Wei Tan",
title = "Egalitarian fairness framework for joint rate and
power optimization in wireless networks",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "58--60",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "How do we efficiently and fairly allocate the resource
in a wireless network? We study a joint rate and power
control optimization to achieve egalitarian fairness
(max-min weighted fairness) in multiuser wireless
networks. The key challenge to optimizing the fairness
of maximizing the data rates for all the users is the
nonconvexity and of the problem. We exploit the
nonlinear Perron--Frobenius theory and nonnegative
matrix theory to solve this nonconvex resource control
problem. A fixed-point algorithm that resembles a
nonlinear version of the Power Method in linear algebra
and converges very fast to the optimal solution is also
proposed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldberg:2014:AOC,
author = "D. A. Goldberg and D. A. Katz and Y. Lu and M. Sharma
and M. S. Squillante",
title = "Asymptotic optimality of constant capacity allocation
policies for dynamic resource planning",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "61--63",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghaderi:2014:AOB,
author = "Javad Ghaderi and Yuan Zhong and R. Srikant",
title = "Asymptotic optimality of {BestFit} for stochastic bin
packing",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "64--66",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the static bin packing problem, items of different
sizes must be packed into bins or servers with unit
capacity in a way that minimizes the number of bins
used, and it is well-known to be a hard combinatorial
problem. Best-Fit is among the simplest online
heuristics for this problem. Motivated by the problem
of packing virtual machines in servers in the cloud, we
consider the dynamic version of this problem, when jobs
arrive randomly over time and leave the system after
completion of their service. We analyze the uid limits
of the system under an asymptotic Best-Fit algorithm
and show that it asymptotically minimizes the number of
servers used in steady state (on the uid scale). The
significance of the result is due to the fact that
Best-Fit seems to achieve the best performance in
practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2014:ISI,
author = "Ali Tizghadam and Alberto Leon-Garcia",
title = "On the impact of subgraph insertion or removal on
{Moore--Penrose Laplacian} and resistance distance",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "67--69",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large body of network-related problems can be
formulated or explained by Moore--Penrose inverse of
the graph Laplacian matrix of the network. This paper
studies the impact of overlaying or removing a subgraph
(inserting / removing a group of links, or modifying a
set of link weights) on Moore--Penrose inverse of the
Laplacian matrix of an existing network topology.
Moreover, an iterative method is proposed to find
point-to-point resistance distance (effective
resistance) and network criticality of a graph as key
performance measures to study the robustness of a
network at the presence of subgraph insertion and/or
subgraph removal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miyazawa:2014:TAS,
author = "Masakiyo Miyazawa",
title = "Tail asymptotics of the stationary distribution for a
two-node generalized {Jackson} network",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "70--72",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2014:ISS,
author = "Mark S. Squillante and Don Towsley and Sean Barker",
title = "Improving the scalability of search in networks
through multiple random walks",
journal = j-SIGMETRICS,
volume = "42",
number = "2",
pages = "73--75",
month = sep,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2667522.2667546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 8 08:49:53 MDT 2014",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chuang:2014:JWP,
author = "John Chuang and Patrick Loiseau",
title = "The {Joint Workshop on Pricing and Incentives in
Networks and Systems (WPIN+NetEcon 2014)}",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "2--3",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kamble:2014:SMP,
author = "Vijay Kamble and Jean Walrand",
title = "Strategy-proof Mechanisms for Purchasing a Shared
Resource",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "4--7",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Manickam:2014:ITM,
author = "Saravana Manickam and Mahesh K. Marina and Sofia
Pediaditaki and Maziar Nekovee",
title = "An Iterative and Truthful Multi-Unit Auction Scheme
for Coordinated Sharing of Spectrum White Spaces",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "8--11",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the use of dynamic auctions for
coordinating spectrum sharing among secondary users,
and propose an online multi-unit, iterative auction
mechanism called VERUM that is truthful and efficient
(the item is always won by the bidder who values it the
most). VERUM is an adaptation of the well known
Ausubel's clinching auction [1] to suit the dynamic
spectrum sharing context. As a use case for VERUM, we
consider TV white space (TVWS) spectrum sharing among
home networks, and compare VERUM with two existing
efficient and truthful multi-unit spectrum auction
schemes, VERITAS and SATYA. Our evaluations, using real
distributions of homes in a dense urban neighborhood in
London and realistic TVWS spectrum availability maps
for the UK, show that VERUM outperforms the other two
schemes in terms of revenue, spectrum utilization and
percentage of winning bidders.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sinha:2014:GMD,
author = "Abhinav Sinha and Achilleas Anastasopoulos",
title = "A General Mechanism Design Methodology for Social
Utility Maximization with Linear Constraints",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "12--15",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Social utility maximization refers to the process of
allocating resources in a way that maximizes the sum of
agents' utilities, under the system constraints. Such
allocation arises in several problems in the general
area of communications, including unicast (and
multicast multi-rate) service on the Internet, as well
as in applications with (local) public goods, such as
power allocation in wireless networks, spectrum
allocation, etc. Mechanisms that implement such
allocations in Nash equilibrium have also been studied
but either they do not possess the full implementation
property, or are given in a case-by-case fashion, thus
obscuring fundamental understanding of these problems.
In this paper we propose a unified methodology for
creating mechanisms that fully implement, in Nash
equilibria, social utility maximizing functions arising
in various contexts where the constraints are convex.
Two additional design goals are the focus of this
paper: (a) the size of the message space scaling
linearly with the number of agents (even if agents'
types are entire valuation functions), (b) allocation
being feasible on and off equilibrium.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weber:2014:FAS,
author = "Steven Weber and Roch Gu{\'e}rin",
title = "Facilitating Adoption of Services with Positive
Externalities via Subsidies",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "16--19",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The paper investigates adoption of network services
whose value incorporates three key features, namely,
heterogeneity in user service affinity, a positive
externality, and a cost. Positive externalities often
result in a chicken and egg ``problem where early
adopters can see a cost that exceeds the service's
(low) initial value. In this paper we study subsidies
as a means to \reach the knee'' and push adoption
higher (from zero to one). We focus on the simplest of
subsidies, namely, a fixed subsidy over a given period
of time, and are able to obtain expressions for
quantities of natural interest, e.g., the minimum
subsidy required, the minimum subsidy duration, and the
total subsidy cost. Interestingly, the expressions
reveal conditions under which the optimal subsidy is
neither the lowest nor applied for the shortest
duration. The findings help develop guidelines for
effective subsidies to promote the adoption of network
services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ajorlou:2014:SID,
author = "Amir Ajorlou and Ali Jadbabaie and Ali Kakhbod",
title = "Strategic Information Diffusion: Spread vs. Exploit",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "20--23",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Frequent drops of prices to zero is a common
phenomenon in price trends of many smartphone
applications. The only means by which many of these
apps spread is the word of mouth of their users.
Motivated by these observations, we study the problem
of optimal dynamic pricing in a social network where
agents can only get informed about the product via word
of mouth from a friend who has already bought the
product. We show that for a durable product such as
many apps, the optimal policy should drop the price to
zero infinitely often, giving away the immediate profit
in full to expand the informed network in order to
exploit it in future. We further show that, beside the
word of mouth nature of the information diffusion, this
behavior crucially depends on the type of the product
being offered. For a nondurable product, although the
firm may initially make some free offers to expand its
network, after a finite period, it will fix the price
at a level that extracts the maximum profit from the
already informed population.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acemoglu:2014:HIL,
author = "Daron Acemoglu and Giacomo Como and Fabio Fagnani and
Asuman Ozdaglar",
title = "Harmonic Influence in Largescale Networks",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "24--24",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raja:2014:FFF,
author = "Vamseedhar Reddyvari Raja and Srinivas Shakkottai and
Amogh Dhamdhere and kc claffy",
title = "Fair, Flexible and Feasible {ISP} Billing",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "25--28",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The 95th percentile method for calculating a
customer's billable transit volume has been the
industry standard used by transit providers for over a
decade due to its simplicity. We recently showed [1]
that 95th percentile billing can be unfair, in that it
does not reflect a customer's contribution to the
provider's peak load. The 95th percentile method is
also inflexible, as it does not allow a provider to
offer incentives to customers that contribute minimally
to the provider's peak load. In this paper we propose a
new transit billing optimization framework that is
fair, flexible and computationally inexpensive. Our
approach is based on the Provision Ratio, a metric that
estimates the contribution of a customer to the
provider's peak traffic. The proposed mechanism has
fairness properties similar to the optimal (in terms of
fairness) Shapley value allocation, with a much smaller
computational complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gyarmati:2014:APB,
author = "Laszlo Gyarmati and Nikolaos Laoutaris and Kostas
Sdrolias and Pablo Rodriguez and Costas Courcoubetis",
title = "From advertising profits to bandwidth prices: a
quantitative methodology for negotiating premium
peering",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "29--32",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simhon:2014:ARG,
author = "Eran Simhon and David Starobinski",
title = "Advance Reservation Games and the Price of
Conservatism",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "33--33",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advance reservation (AR) services form a pillar of
many branches of the economy, e.g., transportation,
lodging, dining, and health care. There has also been
increased interest in applying AR in cloud computing
systems [1]. For instance, Moab Workload Manager [2]
and IBM Platform Computing Solutions [3] support AR. In
both of these packages, an administrator can decide
whether or not to enable AR and define an AR pricing
scheme. In most systems supporting AR, customers can
choose whether making AR or not. Since the payoff of
each customer is affected by decisions of other
customers, it is natural to analyze the behavior of
such systems as strategic games. In this work, we study
a strategic non-cooperative game, referred to as an
advance reservation game. In this game, players
(customers) can reserve future resources in advance for
a fixed reservation fee C. We consider a slotted loss
system with N servers where customers are not flexible,
i.e., they leave the system if they cannot be served at
their desired time slots. Customers are not informed of
the state of the system (i.e., the number of unreserved
servers) prior to attempting a reservation. Thus, a
customer opting not to make a reservation lowers its
chance of finding a server available at the desired
time. The number of customers in each slot is an i.i.d.
Poisson random variable with parameter $ \lambda $ [4].
Customers have different lead times, where the lead
time of a customer is defined as the time elapsing
between its arrival and the slot starting time. Each
customer only knows its own lead time. However, all
lead times are derived from the same continuous
distribution known by both the provider and the
customers. In [5], we derive the equilibria structure
of AR games. We show that for any C > 0, only two types
of equilibria are possible. In the first type, none of
the customers, regardless of their lead times, makes AR
(non-make-AR equilibrium). In the second type, only
customers with lead time greater than some threshold
make AR (threshold equilibrium). Furthermore, we
establish the existence of three different ranges of
fees, such that if C falls in the first range only
threshold equilibria exist, in the second range both
threshold equilibria and a none-make-AR equilibrium
exist, and in the third range only a none-make-AR
equilibrium exists. In many cases, the fee C that
maximizes the provider's profit lies in the second
range. However, setting up a fee in that range carries
also the risk of zero profit for the provider.
Therefore, in order to properly set the AR fee, the
provider should consider both the fee yielding the
maximum possible profit and the fee yielding the
maximum guaranteed profit. A guaranteed profit can be
only achieved using fees falling within the first
range. In this work, we introduce the concept of price
of conservatism (PoC), which corresponds to the ratio
of the maximum possible profit to the maximum
guaranteed profit, and analyze it in different regimes.
A greater PoC indicates greater potential profit loss
if the provider opts to be conservative. First, we
analyze a single-server regime, where we prove that for
any fee the equilibrium is unique (the second range
collapses in that case). Hence, $ P o C = 1 $ and the
provider experiences no loss. Next, we analyze a
many-server regime where $ \lambda = \alpha N $ and $ N
\to \infty $. We distinguish between the cases of
overloaded and underloaded systems (i.e., $ \alpha > 1
$ and $ \alpha < 1 $ respectively). For the overloaded
case, we show that $ P o C = \alpha / (\alpha - 1) $.
Hence, the price of conservatism increases in an
unbounded fashion as $ \alpha $ approaches one from
above. Finally, for the underloaded case, we show that
both the maximum and guaranteed profits converge to
zero.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bentov:2014:PAE,
author = "Iddo Bentov and Charles Lee and Alex Mizrahi and Meni
Rosenfeld",
title = "Proof of Activity: Extending {Bitcoin}'s Proof of Work
via Proof of Stake [Extended Abstract]y",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "34--37",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new protocol for a cryptocurrency, that
builds upon the Bitcoin protocol by combining its Proof
of Work component with a Proof of Stake type of system.
Our Proof of Activity protocol offers good security
against possibly practical attacks on Bitcoin, and has
a relatively low penalty in terms of network
communication and storage space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acemoglu:2014:NSC,
author = "Daron Acemoglu and Azarakhsh Malekian and Asu
Ozdaglar",
title = "Network Security and Contagion",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "38--38",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops a theoretical model of investments
in security in a network of interconnected agents. The
network connections introduce the possibility of
cascading failures depending on exogenous or endogenous
attacks and the profile of security investments by the
agents. The general presumption in the literature,
based on intuitive arguments or analysis of symmetric
networks, is that because security investments create
positive externalities on other agents, there will be
underinvestment in security. We show that this
reasoning is incomplete because of a first-order
economic force: security investments are also strategic
substitutes. In a general (non-symmetric) network, this
implies that underinvestment by some agents will
encourage overinvestment by others. We demonstrate by
means of examples that not only there will be
overinvestment by some agents but also aggregate
probabilities of infection can be lower in equilibrium
than in the social optimum. We then provide sufficient
conditions for underinvestment. This requires both
sufficiently convex cost functions (just convexity is
not enough) and networks that are either symmetric or
locally tree-like (i.e., either trees or in the case of
stochastic networks, without local cycles with high
probability). We also characterize the impact of
network structure on equilibrium and optimal
investments. Finally, we show that when the attack
location is endogenized (by assuming that the attacker
chooses a probability distribution over the location of
the attack in order to maximize damage), there is
another reason for overinvestment: greater investment
by an agent shifts the attack to other parts of the
network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roth:2014:DPT,
author = "Aaron Roth",
title = "Differential Privacy as a Tool for Mechanism Design in
Large Systems",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "39--39",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this talk we overview how differential privacy
gives a collection of tools that can be easily applied
to design algorithms which enjoy remarkable incentive
properties in large systems and markets --- settings in
which the number of interacting agents is large, and
each of the individual players are ``small''. We
illustrate this power with two vignettes --- designing
mediators to coordinate equilibrium behavior in games
of incomplete information (due to Kearns, Pai, Roth,
and Ullman, and Rogers and Roth, 2014), and designing
ascending price auctions such that sincere bidding is
an asymptotic dominant strategy (due to Huang, Hsu,
Roth, Roughgarden, and Wu, 2014). In both of these
settings, we get good incentive properties under the
assumption that the market is ``large'' in some sense.
However, we discuss how our methodology (via
differential privacy) allows us to make substantially
milder ``large market'' assumptions than those commonly
appearing in the literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Georgiadis:2014:DEC,
author = "Leonidas Georgiadis and George Iosifidis and Leandros
Tassiulas",
title = "Dynamic Exchange of Communication Services",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "40--40",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivation.The increasing mobile data demand and the
proliferation of advanced user-owned network equipment
have given rise to collaborative schemes, where users
satisfy each other's communication needs whenever they
have spare network resources [2]. A prerequisite for
the success of these models is to ensure that users
will share their resources in a fair fashion, and hence
will agree to cooperate. Ideally, from a system design
point of view, each user should receive resources (or,
service) commensurate to his contribution. When this is
not possible, e.g., due to asymmetries in resource
availability, we would prefer to have a
lexicographically optimal (or, lex-optimal) outcome
that balances the resource exchanges as much as
possible. Nevertheless, achieving such an allocation is
an intricate task since: (i) the service exchange is
constrained by an underlying graph that prescribes, for
each node, the subset of the nodes he can serve and
receive services from, (ii) each user takes servicing
decisions independently whenever he has idle network
resources, aiming to maximize the total service he
receives in exchange, (iii) each user is not aware of
the resource availability of other users, nor he is
aware of their current service allocation decisions
(towards the other nodes). In this totally dynamic,
fully decentralized and graph-constrained market
setting, the following question arises: how the
lex-optimal allocation can be achieved in an
asynchronous fashion by the users?..",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kazumori:2014:GDA,
author = "Eiichiro Kazumori",
title = "Generalizing Deferred Acceptance Auctions to Allow
Multiple Relinquishment Options",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "41--41",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{AlDaoud:2014:GUS,
author = "Ashraf {Al Daoud} and George Kesidis and J{\"o}rg
Liebeherr",
title = "A Game of Uncoordinated Sharing of Private Spectrum
Commons",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "42--42",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Poularakis:2014:QPQ,
author = "Konstantinos Poularakis and Ioannis Pefkianakis and
Jaideep Chandrashekar and Leandros Tassiulas",
title = "Quid Pro Quo: Reducing Peak Traffic Costs with
(Subscriber) Price Incentives",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "43--43",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivation: ISPs today are seeing unprecedented growth
in residential broadband traffic volumes driven by the
widespread popularity of video streaming services such
as Netflix, Hulu, Youtube and so on. This traffic
explosion creates a need for (expensive) periodic
network capacity upgrades and also increases transit
costs paid to upstream providers. As a response,
several ISPs started to impose time-of-day data caps,
i.e., where per subscriber data usage limits are
defined for particular periods of the day, e.g. BRC
[USA] from 5pm-1am daily.While these have the effect of
reducing traffic in busy periods (and hence costs),
they also run the risk of alienating subscribers due
their punitive nature. In this abstract, we propose an
alternative mechanism where the ISP has a simple fixed
monthly subscription price but also applies price
discounts to influence subscribers to shift their
traffic demands outside of the peak windows. Cost
Model: Most ISPs size the network for peak demand and
over-provision network capacity so that average
utilization stays below a threshold p. Capacity
upgrades are triggered by sustained, upward shifts in
the peak traffic (denoted $ P_{100} $ ). Hence,
capacity cost can be assumed to scale proportionally to
a rate factor $c$ ($ \epsilon $ /Mbps) and $ P_{100}$.
The second cost component, transit, is charged based on
the 95th percentile (denoted $ P_{95}$ ) of the transit
traffic (computed over 5 minute windows in a month) and
a committed price $r$ ($ \epsilon $ per Mbps). The
actual fraction of transit traffic ($f$) depends
greatly on several factors --- ISP size, geographic
market, etc. For example, $ f \approx 0.3$ in Japan,
and $ f \approx 0.8$ in Africa [1]. Utility Model:
Previous work has considered user utility to be a
quadratic function of the monthly data usage [2]. In
this abstract, we assume a general model where each
subscriber may have different utility functions at
different peak-time intervals. Hence, for a subscriber
$k$ that generates $ w_t$ amount of data (in bytes) in
$t$, the total utility will be: $_k U(w) = \Sigma / t
\alpha_{kt} o (b_{kt} o w'_t w'^2_t / 2) (1)$ where $
w'_t = \min \{ v w_t, b_{kt} \} $ and $ \alpha_{kt}$, $
b_{kt}$ are constants. IncentiveMechanism: The exact
price discount is based on two values computed by the
ISP in each interval t: (i) The implementation of the
scientific publication is co-financed through the
Project ``State Scholarships Foundation'' from the
resources of the Operational Program ``Education \&
Lifelong Learning'', European Social Fund (ESF) and the
NSRF, 2007-2013. The first author would like to
acknowledge the ``Alexander S. Onassis'' Public Benefit
Foundation, Greece for providing a scholarship. a data
threshold $ D_t >= 0$ (in bytes) and (ii) a discount
rate $ p t \geq 0$ (in $ \epsilon $ /byte). If the
monthly data usage of $k$ inside $t$, $ d^{kt}$, is
lower than $^t$D, $k$ gets a discount $ p t o (D^t -
d^{kt})$. Then, $k$ will adopt her behavior so that: $
\Max / d^k \geq 0 U^k(d^k) + \Sigma P^t o (D^t ? d^{kt}
(D, p)) + (2)$ where (.)+ = max(., 0). $D$, $p$ and $
d^k$ are the vectors $ 8 t$. We assume that the ISP can
estimate the per subscriber daily traffic pattern and
predict how it will change as a response to the
incentives by solving the problem in (2). Then, the ISP
will try to minimize the incentive and traffic costs: $
\Min / D, p \Sigma / t (p t \Sigma (D^t - d_{kt} (D,
p)) +) + r$ of $ o P_{95} + c / ? o P_{100} (3)$ where
$ P_{95}$, $ P_{100}$ are the traffic percentiles after
incentives. Approximation: The above problem is NP-Hard
in general. The difficulty lies on the ordering in the
objective function, which renders it non-continuous.
Interestingly, the slightly different problem for which
we are to minimize the weighted sum of all the
percentile values from the 95th up to the 100th, is of
lower complexity. Specifically, this problem can be
solved by replacing the percentile terms with new
optimization variables and adding a set of linear
constraints [3]. The ISP can alternatively solve the
above problem to get a solution close to the optimal
one for the problem in (3). Evaluation: We applied the
price incentives on a dataset of 223 subscribers of a
European ISP measured in Dec. 2013. The ISP offers
discounts only during the peak period of 6--11pm, at 1
hour granularity. We set bkt to the total data usage of
$k$ in $t$ in our dataset, and considered utility to be
equal to the tariff price to find akt. We find evidence
of tangible benefits to the ISP (up to 24\% cost
reduction), while the discounts varied from
0.08e--1.45e per subscriber. Conclusion: The proposed
mechanism does not mandate changes in behavior, but
instead offers a quid-pro-quo to subscribers. In
contrast to previous incentive mechanisms [4], [5], our
scheme is designed requiring no dynamic traffic
management and additional communication between the ISP
and the subscribers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lotfi:2014:NNI,
author = "Mohammad Hassan Lotfi and George Kesidis and Saswati
Sarkar",
title = "Network {NonNeutrality} on the {Internet}: Content
Provision Under a Subscription Revenue Model",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "44--44",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Proceedings of the 2014 GreenMetrics Workshop",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joseph:2014:MFT,
author = "Siny Joseph and Vinod Namboodiri and Vishnu C. Dev",
title = "A Market-Driven Framework Towards Environmentally
Sustainable Mobile Computing",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "46--48",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Amid the plethora of initiatives and research
endeavors targeting the minimization of power and
energy consumption of information and communication
technologies (ICT), what has been largely missing is an
effort to reduce the energy consumption and electronic
waste generated by the rapidly growing segment of
mobile computing and communication devices. One
``green'' approach to meet both the goals of minimizing
life cycle energy consumption and reducing electronic
waste generation is that of increased device lifespan.
Increased device lifespans, however, are possible only
if the underlying market forces support such a paradigm
shift. This paper develops a market-driven framework
for mobile phone devices that helps understand the
reasons that affect a firm's decision to offer a green
choice for consumers (where ``green'' is defined as
devices with longer lifespan) and considers the
feasibility, possible benefits, and challenges in
increasing device lifespan.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jalali:2014:ECC,
author = "Fatemeh Jalali and Rob Ayre and Arun Vishwanath and
Kerry Hinton and Tansu Alpcan and Rod Tucker",
title = "Energy Consumption of Content Distribution from Nano
Data Centers versus Centralized Data Centers",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "49--54",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy consumption of nano data centers has recently
been a topic of interest as they emerge as a novel
computing and storage platform. We present end-to-end
energy consumption models for nano data centers and its
centralized counterpart. To assess the energy
consumption of nano and centralized data centers, we
propose flow-based and time-based energy consumption
models for shared and single user network equipment. To
evaluate our models, a set of measurements and
practical experiments are performed. Our results
indicate that nano data centers might lead to energy
savings depending on various factors such as location
of nano servers, type of access network attached to
nano servers, and the ratio of active time to idle time
of nano servers. Thus, nano data centers can complement
centralized ones and lead to savings energy if certain
applications are off-loadable from centralized data
centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miwa:2014:ECH,
author = "Shinobu Miwa and Charles R. Lefurgy",
title = "Evaluation of Core Hopping on {POWER7}",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "55--60",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Controlling and limiting the peak runtime temperature
of a microprocessor has the potential to reduce chip
leakage power consumption, improve chip reliability,
reduce provisioned cooling, and reduce operational
costs. Previously, the technique of core hopping has
been proposed as one method to reduce peak runtime
temperature. However, there is no study that shows core
hopping is beneficial for modern many-core
microprocessors found in high-end servers. This paper
thoroughly examines heat spreading in the 8-core POWER7
microprocessor which provides 5 temperature sensors per
core. We find that the POWER7 heatsink has excellent
heat spreading capabilities which negates the benefits
of core hopping. We conclude that high-performance
servers with similar thermal solutions will also not
see benefit from core-hopping methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Debele:2014:ERS,
author = "Fikru Getachew Debele and Nanfang Li and Michela Meo
and Marco Ricca and Yi Zhang",
title = "Experimenting Resource-on-Demand Strategies for Green
{WLANs}",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "61--66",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "WLAN becomes a necessary facility for exible Internet
provisioning in enterprise networks. Usually this
connection provisioning is designed to handle a peak
load with high density of Access Points (APs) to
guarantee a desired level of quality of service (QoS).
This design approach however leads to energy
inefficiency due to the daily demand variability. In
fact the off-peak period of the daily behavior is
usually quite long and hence some resources consume
power without any beneficiary activity.
Resource-on-demand (RoD) provisioning is among
foreseeable solutions that satisfy both energy
efficiency as well as QoS constraints. That is the
network capacity is dynamically dimensioned to demand
while extra resource goes to low energy consumption
mode to save energy. In dense WLAN scenarios, extra APs
are turned off during low load until required due to
increase in demand. Some theoretical analysis of RoD
has been proposed by related works, but experiments on
real production networks are still needed in order to
investigate the practical issues related to RoD
strategies. This paper addresses some of these issues
through an experimental activity performed on RoD
strategies. Two strategies, namely association-based
and traffic-based policies, are implemented in one part
of Politecnico di Torino Campus WLAN. Results from our
testbed show that RoD strategies are effective and
energy saving up to 70\% is possible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yi:2014:MEC,
author = "Qing Yi and Suresh Singh",
title = "Minimizing Energy Consumption of {FatTree} Data Center
Networks",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "67--72",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many data centers are built using a fat-tree network
topology because of its high bisection bandwidth.
Therefore there is a need to develop analytical models
for the energy behavior of fat-tree networks and
examine strategies to reduce energy consumption. The
most effective strategy is to power off entire
switches, if possible. In this paper, we derive
formulas for the minimum number of active switches
needed in a fat-tree data center network for arbitrary
types of loading. We also derive expressions for the
expected traffic loss when these networks are
overloaded with external (Internet) traffic. Results of
detailed simulations conducted using well-known traffic
models for data center networks [4] closely match our
derived formulas. We show that a fat-tree network
incurs significant energy cost even when very lightly
loaded. In order to further reduce energy consumption,
we consolidate traffic into fewer switches and derive
expressions for energy cost versus load assuming
traffic consolidation and show linear scaling. Finally,
we observe that traffic patterns have a significant
impact on energy consumption and this fact is evident
in the analytical formulas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ren:2014:FLC,
author = "Shaolei Ren and Mohammad A. Islam",
title = "A First Look at Colocation Demand Response",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "73--75",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large data centers can participate in demand response
programs\ and receive financial benefits by reducing
energy consumption upon utility's request. However, the
existing research has only considered demand response
by owner-operated data centers (e.g., Google), leaving
out another distinctly different yet integral part of
the data center industry --- multi-tenant colocation
data centers (a.k.a., colocation or ``colo''), where
the space is shared by multiple tenants for housing
self-owned servers. A major hurdle hindering colocation
demand response is ``split incentive'': the colocation
operator may desire demand response, but lacks control
over the tenants' servers; the tenants, on the other
hand, can reduce server energy consumption but may not
desire demand response unless they are properly
incentivized. In this paper, we present a
first-of-its-kind study on colocation demand response
and propose an incentive mechanism, called iCODE
(incentivizing COlocation tenants for DEmand response),
which breaks the split-incentive barrier for colocation
demand response. iCODE allows the tenants to
voluntarily bid for energy reduction when demand
response is needed and receive monetary rewards if
their bids are accepted. We formally model tenants'
bids and how the colocation operator decides the
winning bids to maximize total energy reduction without
profit loss. We demonstrate the potential of colocation
demand response by using a trace-based simulation to
show that iCODE can significantly reduce energy
consumption (e.g., up to over 50\%) during demand
response periods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cavdar:2014:QBS,
author = "Derya {\c{C}}avdar and Andrea Ros{\`a} and Lydia Y.
Chen and Walter Binder and Fatih Alag{\"o}z",
title = "Quantifying the Brown Side of Priority Schedulers:
Lessons from Big Clusters",
journal = j-SIGMETRICS,
volume = "42",
number = "3",
pages = "76--81",
month = dec,
year = "2014",
CODEN = "????",
DOI = "https://doi.org/10.1145/2695533.2695560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jan 7 14:34:59 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling is a central operation to achieve ``green''
data centers, i.e., distributing diversified workloads
across heterogeneous resources in an energy efficient
manner. Taking an opposite perspective from most of the
related work, this paper reveals the ``brown'' side of
scheduling, i.e., wasted core seconds (so called brown
resources), using field analysis and trace-driven
simulation of a Google cluster trace. First, based on
the trace, we pinpoint the dependency between priority
scheduling and task eviction that causes brown
resources and present a brief characterization study
focusing on workload priorities. Next, to better
understand and further reduce the resource
``inefficiency'' of priority scheduling, we develop a
slot-based scheduler and simulator with various system
tunable parameters. Our key finding is that tasks of
low priority suffer greatly in terms of response time
as well as CPU resources because of a high probability
of being evicted and resubmitted. We propose to use
simple threshold-based policies that consider the
trade-off between task drop rates and wasted core
seconds due to task resubmission due to eviction. Our
experimental results show that we are able to
effectively mitigate brown resources without
sacrificing the performance advantages of priority
scheduling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardagna:2015:SIP,
author = "Danilo Ardagna and Mark S. Squillante",
title = "Special Issue on Performance and Resource Management
in Big Data Applications",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "2--2",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2015:ALA,
author = "Yue Tan and Cathy H. Xia",
title = "An Adaptive Learning Approach for Efficient Resource
Provisioning in Cloud Services",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "3--11",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The emerging cloud computing service market aims at
delivering computing resources as a utility over the
Internet with a high quality. It has evolving unknown
demand that is typically highly uncertain. Traditional
provisioning methods either make idealized assumption
of the demand distribution or rely on extensive offline
statistical analysis of historical data. In this paper,
we present an online adaptive learning approach to
address the optimal resource provisioning problem.
Based on a stochastic loss model of the cloud services,
we formulate the provisioning problem from a revenue
management perspective, and present a stochastic
gradient-based learning algorithm that adaptively
adjusts the provisioning solution as observations of
the demand are continuously made. We show that our
adaptive learning algorithm guarantees optimality and
demonstrate through simulation that they can adapt
quickly to non-stationary demand.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rosa:2015:DCE,
author = "Andrea Ros{\`a} and Lydia Y. Chen and Robert Birke and
Walter Binder",
title = "Demystifying Casualties of Evictions in Big Data
Priority Scheduling",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "12--21",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ever increasing size and complexity of large-scale
datacenters enhance the difficulty of developing
efficient scheduling policies for big data systems,
where priority scheduling is often employed to
guarantee the allocation of system resources to high
priority tasks, at the cost of task preemption and
resulting resource waste. A large number of related
studies focuses on understanding workloads and their
performance impact on such systems; nevertheless,
existing works pay little attention on evicted tasks,
their characteristics, and the resulting impairment on
the system performance. In this paper, we base our
analysis on Google cluster traces, where tasks can
experience three different types of unsuccessful
events, namely eviction, kill and fail. We particularly
focus on eviction events, i.e., preemption of task
execution due to higher priority tasks, and rigorously
quantify their performance drawbacks, in terms of
wasted machine time and resources, with particular
focus on priority. Motivated by the high dependency of
eviction on underlying scheduling policies, we also
study its statistical patterns and its dependency on
other types of unsuccessful events. Moreover, by
considering co-executed tasks and system load, we
deepen the knowledge on priority scheduling, showing
how priority and machine utilization affect the
eviction process and related tasks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ying:2015:EAE,
author = "Yijun Ying and Robert Birke and Cheng Wang and Lydia
Y. Chen and Gautam Natarajan",
title = "On Energyaware Allocation and Execution for Batch and
Interactive {MapReduce}",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "22--30",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The energy-performance optimization of datacenters
becomes ever challenging, due to heterogeneous
workloads featuring different performance constraints.
In addition to conventional web service, MapReduce
presents another important workload class, whose
performance highly depends on data
availability/locality and shows different degrees of
delay sensitivities, such as batch vs. interactive
MapReduce. However, current energy optimization
solutions are mainly designed for a subset of these
workloads and their key features. Here, we present an
energy minimization framework, in particular, a concave
minimization problem, that specifically considers time
variability, data locality, and delay sensitivity of
web, batch-, and interactive-MapReduce. We aim to
maximize the usage of MapReduce servers by using their
spare capacity to run non-MapReduce workloads, while
controlling the workload delays through the execution
of MapReduce tasks, in particular batch ones. We
develop an optimal algorithm with complexity O(T2) in
case of perfect workload information, T being the
length of the time horizon in number of control
windows, and derive the structure of optimal policy for
the case of uncertain workload information. Using
extensive simulation results, we show that the proposed
methodology can efficiently minimize the datacenter
energy cost while fulfilling the delay constraints of
workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2015:MRF,
author = "Jian Tan and Li Zhang and Min Li and Yandong Wang",
title = "Multi-resource Fair Sharing for Multiclass Workflows",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "31--37",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-resource sharing for concurrent workflows
necessitates a fairness criteria to allocate multiple
resources to work-flows with heterogeneous demands.
Recently, this problem has attracted increasing
attention and has been investigated by assuming that
each workflow has a single class of jobs and that each
class contains jobs of the same demand profile. The
demand profile of a class represents the required
multi-resources of a job. However, for typical
applications in cloud computing and distributed data
processing systems, a workflow usually needs to process
multiple classes of jobs. Relying on the concept of
slowdown, we characterize fairness for multi-resource
sharing and address scheduling for multiclass
workflows. We optimize the mixture of different classes
of jobs for a workflow as optimal operation points to
achieve the least slowdown, and discuss desirable
properties for these operation points. These studies
assume that the jobs are infinitely divisible. When
jobs are non-preemptive and indivisible, any fairness
criteria that only relies on the instantaneous resource
allocation cannot be strictly maintained at every time
point. To this end, we relax the instantaneous fairness
to an average metric within a time interval. This
relaxation introduces a time average to fairness and
allows occasional, but not too often, violations of
instantaneous fairness. In addition, it brings
flexibility and opportunities for further optimization
on resource utilization, e.g., using bin-packing,
within the constraint on fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:ECH,
author = "Zhuoyao Zhang and Ludmila Cherkasova and Boon Thau
Loo",
title = "Exploiting Cloud Heterogeneity to Optimize Performance
and Cost of {MapReduce} Processing",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "38--50",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud computing offers a new, attractive option to
customers for quickly provisioning any size Hadoop
cluster, consuming resources as a service, executing
their MapReduce workload, and then paying for the time
these resources were used. One of the open questions in
such environments is the right choice of resources (and
their amount) a user should lease from the service
provider. Typically, there is a variety of different
types of VM instances in the Cloud (e.g., small,
medium, or large EC2 instances). The capacity
differences of the offered VMs are reflected in VM's
pricing. Therefore, for the same price a user can get a
variety of Hadoop clusters based on different VM
instance types. We observe that the performance of
MapReduce applications may vary significantly on
different platforms. This makes a selection of the best
cost/performance platform for a given workload a
non-trivial problem, especially when it contains
multiple jobs with different platform preferences. We
aim to solve the following problem: given a completion
time target for a set of MapReduce jobs, determine a
homogeneous or heterogeneous Hadoop cluster
configuration (i.e., the number, types of VMs, and the
job schedule) for processing these jobs within a given
deadline while minimizing the rented infrastructure
cost. In this work,1 we design an efficient and fast
simulation-based framework for evaluating and selecting
the right underlying platform for achieving the
desirable Service Level Objectives (SLOs). Our
evaluation study with Amazon EC2 platform reveals that
for different workload mixes, an optimized platform
choice may result in 45-68\% cost savings for achieving
the same performance objectives when using different
(but seemingly equivalent) choices. Moreover, depending
on a workload the heterogeneous solution may outperform
the homogeneous cluster solution by 26--42\%. We
provide additional insights explaining the obtained
results by profiling the performance characteristics of
used applications and underlying EC2 platforms. The
results of our simulation study are validated through
experiments with Hadoop clusters deployed on different
Amazon EC2 instances.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Malekimajd:2015:OMR,
author = "Marzieh Malekimajd and Danilo Ardagna and Michele
Ciavotta and Alessandro Maria Rizzi and Mauro
Passacantando",
title = "Optimal Map Reduce Job Capacity Allocation in Cloud
Systems",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "51--61",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We are entering a Big Data world. Many sectors of our
economy are now guided by data-driven decision
processes. Big Data and business intelligence
applications are facilitated by the MapReduce
programming model while, at infrastructural layer,
cloud computing provides flexible and cost effective
solutions for allocating on demand large clusters.
Capacity allocation in such systems is a key challenge
to provide performance for MapReduce jobs and minimize
cloud resource costs. The contribution of this paper is
twofold: (i) we provide new upper and lower bounds for
MapReduce job execution time in shared Hadoop clusters,
(ii) we formulate a linear programming model able to
minimize cloud resources costs and job rejection
penalties for the execution of jobs of multiple classes
with (soft) deadline guarantees. Simulation results
show how the execution time of MapReduce jobs falls
within 14\% of our upper bound on average. Moreover,
numerical analyses demonstrate that our method is able
to determine the global optimal solution of the linear
problem for systems including up to 1,000 user classes
in less than 0.5 seconds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:MIM,
author = "Wei Zhang and Sundaresan Rajasekaran and Shaohua Duan
and Timothy Wood and Mingfa Zhuy",
title = "Minimizing Interference and Maximizing Progress for
{Hadoop} Virtual Machines",
journal = j-SIGMETRICS,
volume = "42",
number = "4",
pages = "62--71",
month = mar,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2788402.2788411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 3 16:05:37 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "Virtualization promised to dramatically increase
server utilization levels, yet many data centers are
still only lightly loaded. In some ways, big data
applications are an ideal fit for using this residual
capacity to perform meaningful work, but the high level
of interference between interactive and batch
processing workloads currently prevents this from being
a practical solution in virtualized environments.
Further, the variable nature of spare capacity may make
it difficult to meet big data application deadlines. In
this work we propose two schedulers: one in the
virtualization layer designed to minimize interference
on high priority interactive services, and one in the
Hadoop framework that helps batch processing jobs meet
their own performance deadlines. Our approach uses
performance models to match Hadoop tasks to the servers
that will benefit them the most, and deadline-aware
scheduling to effectively order incoming jobs. We use
admission control to meet deadlines even when resources
are overloaded. The combination of these schedulers
allows data center administrators to safely mix
resource intensive Hadoop jobs with latency sensitive
web applications, and still achieve predictable
performance for both. We have implemented our system
using Xen and Hadoop, and our evaluation shows that our
schedulers allow a mixed cluster to reduce web response
times by more than ten fold compared to the existing
Xen Credit Scheduler, while meeting more Hadoop
deadlines and lowering total task execution times by
6.5\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hajek:2015:BID,
author = "Bruce Hajek",
title = "Bounds Implied by Drift with Applications",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "1--1",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recurring theme in the design of control schemes for
computer communication networks has been to identify
the drift of critical quantities such as queue lengths,
and then devise control strategies that close the loop.
A useful tool for the performance analysis of such
strategies are bounds on deviations from the expected
trajectory. This talk identifies an incomplete list of
such tools that have been used in a broad class of
applications, for both stochastic and deterministically
constrained models of load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:OAI,
author = "Xiaoxi Zhang and Zhiyi Huang and Chuan Wu and Zongpeng
Li and Francis C. M. Lau",
title = "Online Auctions in {IaaS} Clouds: Welfare and Profit
Maximization with Server Costs",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "3--15",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Auction design has recently been studied for dynamic
resource bundling and VM provisioning in IaaS clouds,
but is mostly restricted to the one-shot or offline
setting. This work targets a more realistic case of
online VM auction design, where: (i) cloud users bid
for resources into the future to assemble customized
VMs with desired occupation durations; (ii) the cloud
provider dynamically packs multiple types of resources
on heterogeneous physical machines (servers) into the
requested VMs; (iii) the operational costs of servers
are considered in resource allocation; (iv) both social
welfare and the cloud provider's net profit are to be
maximized over the system running span. We design
truthful, polynomial time auctions to achieve social
welfare maximization and/or the provider's profit
maximization with good competitive ratios. Our
mechanisms consist of two main modules: (1) an online
primal-dual optimization framework for VM allocation to
maximize the social welfare with server costs, and for
revealing the payments through the dual variables to
guarantee truthfulness; and (2) a randomized reduction
algorithm to convert the social welfare maximizing
auctions to ones that provide a maximal expected profit
for the provider, with competitive ratios comparable to
those for social welfare. We adopt a new application of
Fenchel duality in our primal-dual framework, which
provides richer structures for convex programs than the
commonly used Lagrangian duality, and our optimization
framework is general and expressive enough to handle
various convex server cost functions. The efficacy of
the online auctions is validated through careful
theoretical analysis and trace-driven simulation
studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yun:2015:DPF,
author = "Se-Young Yun and Alexandre Proutiere",
title = "Distributed Proportional Fair Load Balancing in
Heterogeneous Systems",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "17--30",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of distributed load balancing
in heterogeneous parallel server systems, where the
service rate achieved by a user at a server depends on
both the user and the server. Such heterogeneity
typically arises in wireless networks (e.g., servers
may represent frequency bands, and the service rate of
a user varies across bands). We assume that each server
equally shares in time its capacity among users
allocated to it. Users initially attach to an arbitrary
server, but at random instants of time, they probe the
load at a new server and migrate there if this improves
their service rate. The dynamics under this distributed
load balancing scheme, referred to as Random Local
Search (RLS), may be interpreted as those generated by
strategic players updating their strategy in a load
balancing game. In closed systems, where the user
population is fixed, we show that this game has pure
Nash Equilibriums (NEs), and that these equilibriums
get close to a Proportionally Fair (PF) allocation of
users to servers when the user population grows large.
We provide an anytime upper bound of the gap between
the allocation under RLS and the PF allocation. In open
systems, where users randomly enter the system and
leave upon service completion, we establish that the
RLS algorithm stabilizes the system whenever this it at
all possible under centralized load balancing schemes,
i.e., it is throughput-optimal. The proof of this
result relies on a novel Lyapounov analysis that
captures the dynamics due to both users' migration and
their arrivals and departures. To our knowledge, the
RLS algorithm constitutes the first fully distributed
and throughput-optimal load balancing scheme in
heterogeneous parallel server systems. We extend our
analysis to various scenarios, e.g. to cases where
users can be simultaneously served by several servers.
Finally we illustrate through numerical experiments the
efficiency of the RLS algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2015:MRF,
author = "Thomas Bonald and James Roberts",
title = "Multi-Resource Fairness: Objectives, Algorithms and
Performance",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "31--42",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Designing efficient and fair algorithms for sharing
multiple resources between heterogeneous demands is
becoming increasingly important. Applications include
compute clusters shared by multi-task jobs and routers
equipped with middleboxes shared by flows of different
types. We show that the currently preferred objective
of Dominant Resource Fairness (DRF) has a significantly
less favorable efficiency-fairness tradeoff than
alternatives like Proportional Fairness and our
proposal, Bottleneck Max Fairness. We propose practical
algorithms to realize these sharing objectives and
evaluate their performance under a stochastic demand
model. It is shown, in particular, that the
strategyproofness property that motivated the choice of
DRF for an assumed fixed set of jobs or flows, is
largely irrelevant when demand is dynamic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Georgiadis:2015:ESN,
author = "Leonidas Georgiadis and George Iosifidis and Leandros
Tassiulas",
title = "Exchange of Services in Networks: Competition,
Cooperation, and Fairness",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "43--56",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Exchange of services and resources in, or over,
networks is attracting nowadays renewed interest.
However, despite the broad applicability and the
extensive study of such models, e.g., in the context of
P2P networks, many fundamental questions regarding
their properties and efficiency remain unanswered. We
consider such a service exchange model and analyze the
users' interactions under three different approaches.
First, we study a centrally designed service allocation
policy that yields the fair total service each user
should receive based on the service it offers to the
others. Accordingly, we consider a competitive market
where each user determines selfishly its allocation
policy so as to maximize the service it receives in
return, and a coalitional game model where users are
allowed to coordinate their policies. We prove that
there is a unique equilibrium exchange allocation for
both game theoretic formulations, which also coincides
with the central fair service allocation. Furthermore,
we characterize its properties in terms of the
coalitions that emerge and the equilibrium allocations,
and analyze its dependency on the underlying network
graph. That servicing policy is the natural reference
point to the various mechanisms that are currently
proposed to incentivize user participation and improve
the efficiency of such networked service (or, resource)
exchange markets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aalto:2015:WIA,
author = "Samuli Aalto and Pasi Lassila and Prajwal Osti",
title = "{Whittle} Index Approach to Size-aware Scheduling with
Time-varying Channels",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "57--69",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the optimal opportunistic scheduling
problem for downlink data traffic in a wireless cell
with time-varying channels. The scheduler itself
operates in a very fast timescale of milliseconds, but
the objective function is related to minimizing the
holding costs in a much longer timescale, at the
so-called flow level. The Whittle index approach is a
powerful tool in this context, since it renders the
flow level optimization problem with heterogeneous
users tractable. Until now, this approach has been
applied to the opportunistic scheduling problem to
generate non-anticipating index policies that may
depend on the amount of attained service but do not
utilize the exact size information. In this paper, we
produce a size-aware (i.e., anticipating) index policy
by applying the Whittle index approach in a novel way.
By a numerical study based on simulations, we
demonstrate that the resulting size-aware index policy
systematically improves performance. As a side result,
we show that the opportunistic scheduling problem is
indexable when the file sizes follow the Pascal
distribution, and we derive the corresponding Whittle
index, which generalizes earlier results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sur:2015:GIN,
author = "Sanjib Sur and Vignesh Venkateswaran and Xinyu Zhang
and Parmesh Ramanathan",
title = "{60 GHz} Indoor Networking through Flexible Beams: a
Link-Level Profiling",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "71--84",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "60 GHz technology holds tremendous potential to
upgrade wireless link throughput to Gbps level. To
overcome inherent vulnerability to attenuation, 60 GHz
radios communicate by forming highly-directional
electronically-steerable beams. Standards like IEEE
802.11ad have tailored MAC/PHY protocols to such
flexible-beam 60 GHz networks. However, lack of a
reconfigurable platform has thwarted a realistic
proof-of-concept evaluation. In this paper, we conduct
an in-depth measurement of indoor 60 GHz networks using
a first-of-its-kind software-radio platform. Our
measurement focuses on the link-level behavior with
three major perspectives: (i) coverage and bit-rate of
a single link, and implications for 60 GHz MIMO; (ii)
impact of beam-steering on network performance,
particularly under human blockage and device mobility;
(iii) spatial reuse between flexible beams. Our study
dispels some common myths, and reveals key challenges
in maintaining robust flexible-beam connection. We
propose new principles that can tackle such challenges
based on unique properties of 60 GHz channel and
cognitive capability of 60 GHz links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:SDP,
author = "Liang Zhang and Weijie Wu and Dan Wang",
title = "Sponsored Data Plan: a Two-Class Service Model in
Wireless Data Networks",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "85--96",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data traffic demand over the Internet is increasing
rapidly, and it is changing the pricing model between
Internet service providers (ISPs), content providers
(CPs) and end users. One recent pricing proposal is
sponsored data plan, i.e., when accessing contents from
a particular CP, end users do not need to pay for that
volume of traffic consumed, but the CP will sponsor for
this data consumption. In this paper, our goal is to
understand the rationale behind this new pricing model,
as well as its impacts to the wireless data market, in
particular, who will benefit and who will be hurt from
this scheme. We build a two-class service model to
analyze the consumers' traffic demand under the
sponsored data plan with consideration of QoS. We use a
two-stage Stackelberg game to characterize the
interaction between CPs and the ISP and reveal a number
of important findings. Our conclusions include: (1)
When the ISP's capacity is sufficient, the sponsored
data plan benefits consumers and CPs in the short run,
but the ISP does not have incentives to further improve
its service in the long run. (2) When ISP's capacity is
insufficient, the ISP and end users may achieve a win-
win trade, while the ISP and CPs always compete for the
revenue. (3) The sponsored data plan may enlarge the
un- balance in revenue distribution between different
CPs; CPs with higher unit income and poorer technology
support are more likely to prefer the sponsored data
plan.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2015:QPR,
author = "Bin Li and Rayadurgam Srikant",
title = "Queue-Proportional Rate Allocation with Per-Link
Information in Multihop Networks",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "97--108",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745864",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The backpressure scheduling algorithm for multihop
wireless networks is known to be throughput optimal,
but it requires each node to maintain per-destination
queues. Recently, a clever generalization of processor
sharing has been proposed which is also throughput
optimal, but which only uses per-link queues. Here we
propose another algorithm called Queue Proportional
Rate Allocation (QPRA) which also only uses per-link
queues, and allocates service rates to links in
proportion to their queue-lengths and employs the
Serve-In-Random-Order (SIRO) discipline within each
link. Through fluid limit techniques and using a novel
Lyapunov function, we show that the QPRA achieves the
maximum throughput. We demonstrate an advantage of QPRA
by showing that, for the so-called primary interference
model, it is able to develop a low-complexity
scheduling scheme which approximates QPRA and achieves
a constant fraction of the maximum throughput region,
independent of network size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marasevic:2015:RAR,
author = "Jelena Marasevic and Jin Zhou and Harish Krishnaswamy
and Yuan Zhong and Gil Zussman",
title = "Resource Allocation and Rate Gains in Practical
Full-Duplex Systems",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "109--122",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745872",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Full-duplex communication has the potential to
substantially increase the throughput in wireless
networks. However, the benefits of full-duplex are
still not well understood. In this paper, we
characterize the full-duplex rate gains in both
single-channel and multi-channel use cases. For the
single-channel case, we quantify the rate gain as a
function of the remaining self-interference and SNR
values. We also provide a sufficient condition under
which the sum of uplink and downlink rates on a
full-duplex channel is concave in the transmission
power levels. Building on these results, we consider
the multi-channel case. For that case, we introduce a
new realistic model of a small form-factor (e.g.,
smartphone) full-duplex receiver and demonstrate its
accuracy via measurements. We study the problem of
jointly allocating power levels to different channels
and selecting the frequency of maximum
self-interference suppression, where the objective is
maximizing the sum of the rates over uplink and
downlink OFDM channels. We develop a polynomial time
algorithm which is nearly optimal under very mild
restrictions. To reduce the running time, we develop an
efficient nearly-optimal algorithm under the high SINR
approximation. Finally, we demonstrate via numerical
evaluations the capacity gains in the different use
cases and obtain insights into the impact of the
remaining self-interference and wireless channel states
on the performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2015:TSS,
author = "Nicolas Gast and Benny {Van Houdt}",
title = "Transient and Steady-state Regime of a Family of
List-based Cache Replacement Algorithms",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "123--136",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the performance of a family of
cache replacement algorithms. The cache is decomposed
into lists. Items enter the cache via the first list.
An item enters the cache via the first list and jumps
to the next list whenever a hit on it occurs. The
classical policies FIFO, RANDOM, CLIMB and its hybrids
are obtained as special cases. We present explicit
expressions for the cache content distribution and miss
probability under the IRM model. We develop an
algorithm with a time complexity that is polynomial in
the cache size and linear in the number of items to
compute the exact miss probability. We introduce lower
and upper bounds on the latter that can be computed in
a time that is linear in the cache size times the
number of items. We further introduce a mean field
model to approximate the transient behavior of the miss
probability and prove that this model becomes exact as
the cache size and number of items tends to infinity.
We show that the set of ODEs associated to the mean
field model has a unique fixed point that can be used
to approximate the miss probability in case the exact
computation becomes too time consuming. Using this
approximation, we provide guidelines on how to select a
replacement algorithm within the family considered such
that a good trade-off is achieved between the cache
reactivity and its steady-state hit probability. We
simulate these cache replacement algorithms on traces
of real data and show that they can outperform LRU.
Finally, we also disprove the well-known conjecture
that the CLIMB algorithm is the optimal finite-memory
replacement algorithm under the IRM model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kandemir:2015:MRR,
author = "Mahmut Kandemir and Hui Zhao and Xulong Tang and
Mustafa Karakoy",
title = "Memory Row Reuse Distance and its Role in Optimizing
Application Performance",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "137--149",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Continuously increasing dataset sizes of large-scale
applications overwhelm on-chip cache capacities and
make the performance of last-level caches (LLC)
increasingly important. That is, in addition to
maximizing LLC hit rates, it is becoming equally
important to reduce LLC miss latencies. One of the
critical factors that influence LLC miss latencies is
row-buffer locality (i.e., the fraction of LLC misses
that hit in the large buffer attached to a memory
bank). While there has been a plethora of recent works
on optimizing row-buffer performance, to our knowledge,
there is no study that quantifies the full potential of
row-buffer locality and impact of maximizing it on
application performance. Focusing on multithreaded
applications, the first contribution of this paper is
the definition of a new metric called (memory) row
reuse distance (RRD). We show that, while intra-core
RRDs are relatively small (increasing the chances for
row-buffer hits), inter-core RRDs are quite large
(increasing the chances for row-buffer misses).
Motivated by this, we propose two schemes that measure
the maximum potential benefits that could be obtained
from minimizing RRDs, to the extent allowed by program
dependencies. Specifically, one of our schemes
(Scheme-I) targets only intra-core RRDs, whereas the
other one (Scheme-II) aims at reducing both intra-core
RRDs and inter-core RRDs. Our experimental evaluations
demonstrate that (i) Scheme-I reduces intra-core RRDs
but increases inter-core RRDs; (ii) Scheme-II reduces
inter-core RRDs significantly while achieving a similar
behavior to Scheme-I as far as intra-core RRDs are
concerned; (iii) Scheme-I and Scheme-II improve
execution times of our applications by 17\% and 21\%,
respectively, on average; and (iv) both our schemes
deliver consistently good results under different
memory request scheduling policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2015:SED,
author = "Xiaomeng Chen and Ning Ding and Abhilash Jindal and Y.
Charlie Hu and Maruti Gupta and Rath Vannithamby",
title = "{Smartphone} Energy Drain in the Wild: Analysis and
Implications",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "151--164",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The limited battery life of modern smartphones remains
a leading factor adversely affecting the mobile
experience of millions of smartphone users. In order to
extend battery life, it is critical to understand where
and how is energy drain happening on users' phones
under normal usage, for example, in a one-day cycle. In
this paper, we conduct the first extensive measurement
and modeling of energy drain of 1520 smartphone in the
wild. We make two primary contributions. First, we
develop a hybrid power model that integrates
utilization-based models and FSM-based models for
different phone components with a novel technique that
estimates the triggers for the FSM-based network power
model based on network utilization. Second, through
analyzing traces collected on 1520 Galaxy S3 and S4
devices in the wild, we present detailed analysis of
where the CPU time and energy are spent across the 1520
devices, inside the 800 apps, as well as along several
evolution dimensions, including hardware, Android,
cellular, and app updates. Our findings of smartphone
energy drain in the wild have significant implications
to the various key players of the Android phone
eco-system, including phone vendors Samsung, Android
developers, app developers, and ultimately millions of
smartphone users, towards the common goal of extending
smartphone battery life and improving the user mobile
experience.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2015:NSB,
author = "Ming Chen and Dean Hildebrand and Geoff Kuenning and
Soujanya Shankaranarayana and Bharat Singh and Erez
Zadok",
title = "Newer Is Sometimes Better: an Evaluation of
{NFSv4.1}",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "165--176",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The popular Network File System (NFS) protocol is 30
years old. The latest version, NFSv4, is more than ten
years old but has only recently gained stability and
acceptance. NFSv4 is vastly different from its
predecessors: it offers a stateful server, strong
security, scalability/WAN features, and callbacks,
among other things. Yet NFSv4's efficacy and ability to
meet its stated design goals had not been thoroughly
studied until now. This paper compares NFSv4.1's
performance with NFSv3 using a wide range of micro- and
macro-benchmarks on a testbed configured to exercise
the core protocol features. We (1) tested NFSv4's
unique features, such as delegations and statefulness;
(2) evaluated performance comprehensively with
different numbers of threads and clients, and different
network latencies and TCP/IP features; (3) found,
fixed, and reported several problems in Linux's NFSv4.1
implementation, which helped improve performance by up
to 11X; and (4) discovered, analyzed, and explained
several counter-intuitive results. Depending on the
workload, NFSv4.1 was up to 67\\% slower than NFSv3 in
a low-latency network, but exceeded NFSv3's performance
by up to 2.9X in a high-latency environment. Moreover,
NFSv4.1 outperformed NFSv3 by up to 172X when
delegations were used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meza:2015:LSS,
author = "Justin Meza and Qiang Wu and Sanjev Kumar and Onur
Mutlu",
title = "A Large-Scale Study of Flash Memory Failures in the
Field",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "177--190",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745848",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Servers use flash memory based solid state drives
(SSDs) as a high-performance alternative to hard disk
drives to store persistent data. Unfortunately, recent
increases in flash density have also brought about
decreases in chip-level reliability. In a data center
environment, flash-based SSD failures can lead to
downtime and, in the worst case, data loss. As a
result, it is important to understand flash memory
reliability characteristics over flash lifetime in a
realistic production data center environment running
modern applications and system software. This paper
presents the first large-scale study of flash-based SSD
reliability in the field. We analyze data collected
across a majority of flash-based solid state drives at
Facebook data centers over nearly four years and many
millions of operational hours in order to understand
failure properties and trends of flash-based SSDs. Our
study considers a variety of SSD characteristics,
including: the amount of data written to and read from
flash chips; how data is mapped within the SSD address
space; the amount of data copied, erased, and discarded
by the flash controller; and flash board temperature
and bus power. Based on our field analysis of how flash
memory errors manifest when running modern workloads on
modern SSDs, this paper is the first to make several
major observations: (1) SSD failure rates do not
increase monotonically with flash chip wear; instead
they go through several distinct periods corresponding
to how failures emerge and are subsequently detected,
(2) the effects of read disturbance errors are not
prevalent in the field, (3) sparse logical data layout
across an SSD's physical address space (e.g.,
non-contiguous data), as measured by the amount of
metadata required to track logical address translations
stored in an SSD-internal DRAM buffer, can greatly
affect SSD failure rate, (4) higher temperatures lead
to higher failure rates, but techniques that throttle
SSD operation appear to greatly reduce the negative
reliability impact of higher temperatures, and (5) data
written by the operating system to flash-based SSDs
does not always accurately indicate the amount of wear
induced on flash cells due to optimizations in the SSD
controller and buffering employed in the system
software. We hope that the findings of this first
large-scale flash memory reliability study can inspire
others to develop other publicly-available analyses and
novel flash reliability solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2015:OCO,
author = "Niangjun Chen and Anish Agarwal and Adam Wierman and
Siddharth Barman and Lachlan L. H. Andrew",
title = "Online Convex Optimization Using Predictions",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "191--204",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Making use of predictions is a crucial, but
under-explored, area of online algorithms. This paper
studies a class of online optimization problems where
we have external noisy predictions available. We
propose a stochastic prediction error model that
generalizes prior models in the learning and stochastic
control communities, incorporates correlation among
prediction errors, and captures the fact that
predictions improve as time passes. We prove that
achieving sublinear regret and constant competitive
ratio for online algorithms requires the use of an
unbounded prediction window in adversarial settings,
but that under more realistic stochastic prediction
error models it is possible to use Averaging Fixed
Horizon Control (AFHC) to simultaneously achieve
sublinear regret and constant competitive ratio in
expectation using only a constant-sized prediction
window. Furthermore, we show that the performance of
AFHC is tightly concentrated around its mean.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2015:RMC,
author = "Donghyeon Lee and Joonyoung Kim and Hyunmin Lee and
Kyomin Jung",
title = "Reliable Multiple-choice Iterative Algorithm for
Crowdsourcing Systems",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "205--216",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745871",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The appearance of web-based crowdsourcing systems
gives a promising solution to exploiting the wisdom of
crowds efficiently in a short time with a relatively
low budget. Despite their efficiency, crowdsourcing
systems have an inherent problem in that responses from
workers can be unreliable since workers are low-paid
and have low responsibility. Although simple majority
voting can be a solution, various research studies have
sought to aggregate noisy responses to obtain greater
reliability in results through effective techniques
such as Expectation-Maximization (EM) based algorithms.
While EM-based algorithms get the limelight in
crowdsourcing systems due to their useful inference
techniques, Karger et al. made a significant
breakthrough by proposing a novel iterative algorithm
based on the idea of low-rank matrix approximations and
the message passing technique. They showed that the
performance of their iterative algorithm is
order-optimal, which outperforms majority voting and
EM-based algorithms. However, their algorithm is not
always applicable in practice since it can only be
applied to binary-choice questions. Recently, they
devised an inference algorithm for multi-class
labeling, which splits each task into a bunch of
binary-choice questions and exploits their existing
algorithm. However, it has difficulty in combining into
real crowdsourcing systems since it overexploits
redundancy in that each split question should be
queried in multiple times to obtain reliable results.
In this paper, we design an iterative algorithm to
infer true answers for multiple-choice questions, which
can be directly applied to real crowdsourcing systems.
Our algorithm can also be applicable to short-answer
questions as well. We analyze the performance of our
algorithm, and prove that the error bound decays
exponentially. Through extensive experiments, we verify
that our algorithm outperforms majority voting and
EM-based algorithm in accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2015:OLA,
author = "Yang Liu and Mingyan Liu",
title = "An Online Learning Approach to Improving the Quality
of Crowd-Sourcing",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "217--230",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745874",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a crowd-sourcing problem where in the
process of labeling massive datasets, multiple labelers
with unknown annotation quality must be selected to
perform the labeling task for each incoming data sample
or task, with the results aggregated using for example
simple or weighted majority voting rule. In this paper
we approach this labeler selection problem in an online
learning framework, whereby the quality of the labeling
outcome by a specific set of labelers is estimated so
that the learning algorithm over time learns to use the
most effective combinations of labelers. This type of
online learning in some sense falls under the family of
multi-armed bandit (MAB) problems, but with a distinct
feature not commonly seen: since the data is unlabeled
to begin with and the labelers' quality is unknown,
their labeling outcome (or reward in the MAB context)
cannot be directly verified; it can only be estimated
against the crowd and known probabilistically. We
design an efficient online algorithm LS\_OL using a
simple majority voting rule that can differentiate
high- and low-quality labelers over time, and is shown
to have a regret (w.r.t. always using the optimal set
of labelers) of O(log$^2$ T) uniformly in time under
mild assumptions on the collective quality of the
crowd, thus regret free in the average sense. We
discuss performance improvement by using a more
sophisticated majority voting rule, and show how to
detect and filter out ``bad'' (dishonest, malicious or
very incompetent) labelers to further enhance the
quality of crowd-sourcing. Extension to the case when a
labeler's quality is task-type dependent is also
discussed using techniques from the literature on
continuous arms. We present numerical results using
both simulation and a real dataset on a set of images
labeled by Amazon Mechanic Turks (AMT).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Combes:2015:LRR,
author = "Richard Combes and Stefan Magureanu and Alexandre
Proutiere and Cyrille Laroche",
title = "Learning to Rank: Regret Lower Bounds and Efficient
Algorithms",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "231--244",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Algorithms for learning to rank Web documents, display
ads, or other types of items constitute a fundamental
component of search engines and more generally of
online services. In such systems, when a user makes a
request or visits a web page, an ordered list of items
(e.g. documents or ads) is displayed; the user scans
this list in order, and clicks on the first relevant
item if any. When the user clicks on an item, the
reward collected by the system typically decreases with
the position of the item in the displayed list. The
main challenge in the design of sequential list
selection algorithms stems from the fact that the
probabilities with which the user clicks on the various
items are unknown and need to be learned. We formulate
the design of such algorithms as a stochastic bandit
optimization problem. This problem differs from the
classical bandit framework: (1) the type of feedback
received by the system depends on the actual relevance
of the various items in the displayed list (if the user
clicks on the last item, we know that none of the
previous items in the list are relevant); (2) there are
inherent correlations between the average relevance of
the items (e.g. the user may be interested in a
specific topic only). We assume that items are
categorized according to their topic and that users are
clustered, so that users of the same cluster are
interested in the same topic. We investigate several
scenarios depending on the available side-information
on the user before selecting the displayed list: (a) we
first treat the case where the topic the user is
interested in is known when she places a request; (b)
we then study the case where the user cluster is known
but the mapping between user clusters and topics is
unknown. For both scenarios, we derive regret lower
bounds and devise algorithms that approach these
fundamental limits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Combes:2015:BBR,
author = "Richard Combes and Chong Jiang and Rayadurgam
Srikant",
title = "Bandits with Budgets: Regret Lower Bounds and Optimal
Algorithms",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "245--257",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745847",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate multi-armed bandits with budgets, a
natural model for ad-display optimization encountered
in search engines. We provide asymptotic regret lower
bounds satisfied by any algorithm, and propose
algorithms which match those lower bounds. We consider
different types of budgets: scenarios where the
advertiser has a fixed budget over a time horizon, and
scenarios where the amount of money that is available
to spend is incremented in each time slot. Further, we
consider two different pricing models, one in which an
advertiser is charged for each time her ad is shown
(i.e., for each impression) and one in which the
advertiser is charged only if a user clicks on the ad.
For all of these cases, we show that it is possible to
achieve O(log(T)) regret. For both the
cost-per-impression and cost-per-click models, with a
fixed budget, we provide regret lower bounds that apply
to any uniformly good algorithm. Further, we show that
B-KL-UCB, a natural variant of KL-UCB, is
asymptotically optimal for these cases. Numerical
experiments (based on a real-world data set) further
suggest that B-KL-UCB also has the same or better
finite-time performance when compared to various
previously proposed (UCB-like) algorithms, which is
important when applying such algorithms to a real-world
problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chalermsook:2015:SNM,
author = "Parinya Chalermsook and Atish Das Sarma and Ashwin
Lall and Danupon Nanongkai",
title = "Social Network Monetization via Sponsored Viral
Marketing",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "259--270",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745853",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{Viral marketing is a powerful tool for online
advertising and sales because it exploits the influence
people have on one another. While this marketing
technique has been beneficial for advertisers, it has
not been shown how the social network providers such as
Facebook and Twitter can benefit from it. In this
paper, we initiate the study of sponsored viral
marketing where a social network provider that has
complete knowledge of its network is hired by several
advertisers to provide viral marketing. Each advertiser
has its own advertising budget and a fixed amount they
are willing to pay for each user that adopts their
product or shares their ads. The goal of the social
network provider is to gain the most revenue from the
advertisers. Since the products or ads from different
advertisers may compete with each other in getting
users' attention, and advertisers pay differently per
share and have different budgets, it is very important
that the social network providers start the ``seeds''
of the viral marketing of each product at the right
places in order to gain the most benefit. We study both
when advertisers have limited and unlimited budgets. In
the unlimited budget setting, we give a tight
approximation algorithm for the above task: we present
a polynomial-time O (log n )-approximation algorithm
for maximizing the expected revenue, where n is the
number of nodes (i.e., users) in the social network,
and show that no polynomial-time $ O (\log^{1 -
\epsilon } n)$-approximation algorithm exists, unless
NP $ \subseteq $ DTIME}($ n^{\poly \log n}$). In the
limited budget setting, we show that it is hopeless to
solve the problem (even approximately): unless P = NP,
there is no polynomial-time $ O(n^{1 - \epsilon
})$-approximation algorithm. We perform experiments on
several data sets to compare our provable algorithms to
several heuristic baselines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fanti:2015:SVS,
author = "Giulia Fanti and Peter Kairouz and Sewoong Oh and
Pramod Viswanath",
title = "Spy vs. Spy: Rumor Source Obfuscation",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "271--284",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Anonymous messaging platforms, such as Secret, Yik Yak
and Whisper, have emerged as important social media for
sharing one's thoughts without the fear of being judged
by friends, family, or the public. Further, such
anonymous platforms are crucial in nations with
authoritarian governments; the right to free expression
and sometimes the personal safety of the author of the
message depend on anonymity. Whether for fear of
judgment or personal endangerment, it is crucial to
keep anonymous the identity of the user who initially
posted a sensitive message. In this paper, we consider
an adversary who observes a snapshot of the spread of a
message at a certain time. Recent advances in rumor
source detection shows that the existing messaging
protocols are vulnerable against such an adversary. We
introduce a novel messaging protocol, which we call
adaptive diffusion, and show that it spreads the
messages fast and achieves a perfect obfuscation of the
source when the underlying contact network is an
infinite regular tree: all users with the message are
nearly equally likely to have been the origin of the
message. Experiments on a sampled Facebook network show
that it effectively hides the location of the source
even when the graph is finite, irregular and has
cycles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massoulie:2015:GBT,
author = "Laurent Massouli{\'e} and Mesrob I. Ohannessian and
Alexandre Prouti{\`e}re",
title = "Greedy-{Bayes} for Targeted News Dissemination",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "285--296",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work addresses user targeting for news content
delivery. Specifically, we wish to disseminate a fresh
news content, whose topic is yet unknown, to all
interested users, while ``spamming'' a minimum number
of uninterested users. We formulate this as an online
stochastic optimization problem that extends in several
ways the classical multi-armed bandit problem. We
introduce Greedy-Bayes, a policy with appealing
robustness properties. We establish optimal scaling of
a suitably defined regret measure in various scenarios
of interest. To that end we develop an original proof
technique based on martingale concentration
inequalities. Numerical experiments show that
Greedy-Bayes improves upon Thompson sampling, the
state-of-the-art algorithm for bandit problems. Our
analysis further implies that low regret can only be
achieved if the assessment of content relevance for one
user leverages feedback from users with widely distinct
tastes. This impacts the design of efficient news
dissemination platforms: existing systems typically do
not leverage such negative feedback and could hence be
improved upon with adequate extensions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tarihi:2015:DAD,
author = "Mojtaba Tarihi and Hossein Asadi and Hamid
Sarbazi-Azad",
title = "{DiskAccel}: Accelerating Disk-Based Experiments by
Representative Sampling",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "297--308",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk traces are typically used to analyze real-life
workloads and for replay-based evaluations. This
approach benefits from capturing important details such
as varying behavior patterns, bursty activity, and
diurnal patterns of system activity, which are often
missing from the behavior of workload synthesis tools.
However, accurate capture of such details requires
recording traces containing long durations of system
activity, which are difficult to use for replay-based
evaluation. One way of solving the problem of long
storage trace duration is the use of disk simulators.
While publicly available disk simulators can greatly
accelerate experiments, they have not kept up with
technological innovations in the field. The variety,
complexity, and opaque nature of storage hardware make
it very difficult to implement accurate simulators. The
alternative, replaying the whole traces on real
hardware, suffers from either long run-time or required
manual reduction of experimental time, potentially at
the cost of reduced accuracy. On the other hand,
burstiness, auto-correlation, and complex
spatio-temporal properties of storage workloads make
the known methods of sampling workload traces less
effective. In this paper, we present a methodology
called DiskAccel to efficiently select key intervals of
a trace as representatives and to replay them to
estimate the response time of the whole workload. Our
methodology extracts a variety of spatial and temporal
features from each interval and uses efficient data
mining techniques to select the representative
intervals. To verify the proposed methodology, we have
implemented a tool capable of running whole traces or
selective intervals on real hardware, warming up
hardware state in an accelerated manner, and emulating
request causality while minimizing request
inter-arrival time error. Based on our experiments,
DiskAccel manages to speed up disk replay by more than
two orders of magnitude, while keeping average
estimation error at 7.6\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2015:CPI,
author = "Ye Jin and Xiaosong Ma and Mingliang Liu and Qing Liu
and Jeremy Logan and Norbert Podhorszki and Jong Youl
Choi and Scott Klasky",
title = "Combining Phase Identification and Statistic Modeling
for Automated Parallel Benchmark Generation",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "309--320",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel application benchmarks are indispensable for
evaluating/optimizing HPC software and hardware.
However, it is very challenging and costly to obtain
high-fidelity benchmarks reflecting the scale and
complexity of state-of-the-art parallel applications.
Hand-extracted synthetic benchmarks are time- and
labor-intensive to create. Real applications
themselves, while offering most accurate performance
evaluation, are expensive to compile, port,
reconfigure, and often plainly inaccessible due to
security or ownership concerns. This work contributes
APPrime, a novel tool for trace-based automatic
parallel benchmark generation. Taking as input standard
communication-I/O traces of an application's execution,
it couples accurate automatic phase identification with
statistical regeneration of event parameters to create
compact, portable, and to some degree reconfigurable
parallel application benchmarks. Experiments with four
NAS Parallel Benchmarks (NPB) and three real scientific
simulation codes confirm the fidelity of APPrime
benchmarks. They retain the original applications'
performance characteristics, in particular their
relative performance across platforms. Also, the result
benchmarks, already released online, are much more
compact and easy-to-port compared to the original
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2015:PDC,
author = "Qiaomin Xie and Xiaobo Dong and Yi Lu and Rayadurgam
Srikant",
title = "Power of $d$ Choices for Large-Scale Bin Packing: a
Loss Model",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "321--334",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "We consider a system of $N$ parallel servers, where
each server consists of B units of a resource. Jobs
arrive at this system according to a Poisson process,
and each job stays in the system for an exponentially
distributed amount of time. Each job may request
different units of the resource from the system. The
goal is to understand how to route arriving jobs to the
servers to minimize the probability that an arriving
job does not find the required amount of resource at
the server, i.e., the goal is to minimize blocking
probability. The motivation for this problem arises
from the design of cloud computing systems in which the
jobs are virtual machines (VMs) that request resources
such as memory from a large pool of servers. In this
paper, we consider power-of- d -choices routing, where
a job is routed to the server with the largest amount
of available resource among $ d \geq 2$ randomly chosen
servers. We consider a fluid model that corresponds to
the limit as N goes to infinity and provide an explicit
upper bound for the equilibrium blocking probability.
We show that the upper bound exhibits different
behavior as B goes to infinity depending on the
relationship between the total traffic intensity
\lambda and B. In particular, if $ (B - \lambda) /
\sqrt {\lambda } \to \alpha $, the upper bound is
doubly exponential in $ \sqrt {\lambda }$ and if $ (B -
\lambda) / \log_d \lambda \to \beta $, $ \beta > 1$,
the upper bound is exponential in $ \lambda $.
Simulation results show that the blocking probability,
even for small B, exhibits qualitatively different
behavior in the two traffic regimes. This is in
contrast with the result for random routing, where the
blocking probability scales as $ O (1 / \sqrt \lambda)$
even if $ (B - \lambda) / \sqrt {\lambda } \to \alpha
$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rizk:2015:CBF,
author = "Amr Rizk and Felix Poloczek and Florin Ciucu",
title = "Computable Bounds in Fork-Join Queueing Systems",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "335--346",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a Fork-Join (FJ) queueing system an upstream fork
station splits incoming jobs into N tasks to be further
processed by N parallel servers, each with its own
queue; the response time of one job is determined, at a
downstream join station, by the maximum of the
corresponding tasks' response times. This queueing
system is useful to the modelling of multi-service
systems subject to synchronization constraints, such as
MapReduce clusters or multipath routing. Despite their
apparent simplicity, FJ systems are hard to analyze.
This paper provides the first computable stochastic
bounds on the waiting and response time distributions
in FJ systems. We consider four practical scenarios by
combining (1a) renewal and (1b) non-renewal arrivals,
and (2a) non-blocking and (2b) blocking servers. In the
case of non blocking servers we prove that delays scale
as $ O(\log N) $, a law which is known for first
moments under renewal input only. In the case of
blocking servers, we prove that the same factor of $
\log N $ dictates the stability region of the system.
Simulation results indicate that our bounds are tight,
especially at high utilizations, in all four scenarios.
A remarkable insight gained from our results is that,
at moderate to high utilizations, multipath routing
'makes sense' from a queueing perspective for two paths
only, i.e., response times drop the most when $ N = 2
$; the technical explanation is that the resequencing
(delay) price starts to quickly dominate the tempting
gain due to multipath transmissions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gardner:2015:RLR,
author = "Kristen Gardner and Samuel Zbarsky and Sherwin Doroudi
and Mor Harchol-Balter and Esa Hyytia",
title = "Reducing Latency via Redundant Requests: Exact
Analysis",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "347--360",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent computer systems research has proposed using
redundant requests to reduce latency. The idea is to
run a request on multiple servers and wait for the
first completion (discarding all remaining copies of
the request). However there is no exact analysis of
systems with redundancy. This paper presents the first
exact analysis of systems with redundancy. We allow for
any number of classes of redundant requests, any number
of classes of non-redundant requests, any degree of
redundancy, and any number of heterogeneous servers. In
all cases we derive the limiting distribution on the
state of the system. In small (two or three server)
systems, we derive simple forms for the distribution of
response time of both the redundant classes and
non-redundant classes, and we quantify the ``gain'' to
redundant classes and ``pain'' to non-redundant classes
caused by redundancy. We find some surprising results.
First, the response time of a fully redundant class
follows a simple Exponential distribution and that of
the non-redundant class follows a Generalized
Hyperexponential. Second, fully redundant classes are
``immune'' to any pain caused by other classes becoming
redundant. We also compare redundancy with other
approaches for reducing latency, such as optimal
probabilistic splitting of a class among servers
(Opt-Split) and Join-the-Shortest-Queue (JSQ) routing
of a class. We find that, in many cases, redundancy
outperforms JSQ and Opt-Split with respect to overall
response time, making it an attractive solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soltan:2015:JCP,
author = "Saleh Soltan and Mihalis Yannakakis and Gil Zussman",
title = "Joint Cyber and Physical Attacks on Power Grids: Graph
Theoretical Approaches for Information Recovery",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "361--374",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent events demonstrated the vulnerability of power
grids to cyber attacks and to physical attacks.
Therefore, we focus on joint cyber and physical attacks
and develop methods to retrieve the grid state
information following such an attack. We consider a
model in which an adversary attacks a zone by
physically disconnecting some of its power lines and
blocking the information flow from the zone to the
grid's control center. We use tools from linear algebra
and graph theory and leverage the properties of the
power flow DC approximation to develop methods for
information recovery. Using information observed
outside the attacked zone, these methods recover
information about the disconnected lines and the phase
angles at the buses. We identify sufficient conditions
on the zone structure and constraints on the attack
characteristics such that these methods can recover the
information. We also show that it is NP-hard to find an
approximate solution to the problem of partitioning the
power grid into the minimum number of attack-resilient
zones. However, since power grids can often be
represented by planar graphs, we develop a constant
approximation partitioning algorithm for these graphs.
Finally, we numerically study the relationships between
the grid's resilience and its structural properties,
and demonstrate the partitioning algorithm on real
power grids. The results can provide insights into the
design of a secure control network for the smart
grid.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2015:IFH,
author = "Virag Shah and Gustavo de Veciana",
title = "Impact of Fairness and Heterogeneity on Delays in
Large-scale Content Delivery Networks",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "375--387",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider multi-class queueing systems where the per
class service rates depend on the network state,
fairness criterion, and is constrained to be in a
symmetric polymatroid capacity region. We develop new
comparison results leading to explicit bounds on the
mean service time under various fairness criteria and
possibly heterogeneous loads. We then study large-scale
systems with growing numbers of service classes n
(e.g., files), heterogeneous servers m and polymatroid
capacity resulting from a random bipartite graph
modeling service availability (e.g., placement of files
across servers). This models, for example, a large
scale content delivery network (CDN) supporting
parallel servicing of a download request. For an
appropriate asymptotic regime, we show that the
system's capacity region is uniformly close to a
symmetric polymatroid --- i.e., heterogeneity in
servers' capacity and file placement disappears.
Combining our comparison results and the asymptotic
'symmetry' in large systems, we study performance
robustness to heterogeneity in per class loads and
fairness criteria. Roughly, if each class can be served
by $ c_n = \omega (\log n) $ servers, the load per
class does not exceed $ \theta_n = o(\min (n / \log n,
c_n)) $, and average server utilization is bounded by $
\lambda < 1 $, then mean delay satisfies the following
bound: $ E[D^{(n)}] \leq K \theta n / c_n 1 / \lambda $
log (1 / (1 - \lambda)), where $K$ is a constant. Thus,
large, randomly configured CDNs with a logarithmic
number of file copies are robust to substantial load
and server heterogeneities for a class of fairness
criteria.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2015:FIB,
author = "Ting He and Chang Liu and Ananthram Swami and Don
Towsley and Theodoros Salonidis and Andrei Iu. Bejan
and Paul Yu",
title = "{Fisher} Information-based Experiment Design for
Network Tomography",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "389--402",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network tomography aims to infer the individual
performance of networked elements (e.g., links) using
aggregate measurements on end-to-end paths. Previous
work on network tomography focuses primarily on
developing estimators using the given measurements,
while the design of measurements is often neglected. We
fill this gap by proposing a framework to design
probing experiments with focus on probe allocation, and
applying it to two concrete problems: packet loss
tomography and packet delay variation (PDV) tomography.
Based on the Fisher Information Matrix (FIM), we design
the distribution of probes across paths to maximize the
best accuracy of unbiased estimators, asymptotically
achievable by the maximum likelihood estimator. We
consider two widely-adopted objective functions:
determinant of the inverse FIM (D-optimality) and trace
of the inverse FIM (A-optimality). We also extend the
A-optimal criterion to incorporate heterogeneity in
link weights. Under certain conditions on the FIM,
satisfied by both loss and PDV tomography, we derive
explicit expressions for both objective functions. When
the number of probing paths equals the number of links,
these lead to closed-form solutions for the optimal
design; when there are more paths, we develop a
heuristic to select a subset of paths and optimally
allocate probes within the subset. Observing the
dependency of the optimal design on unknown parameters,
we further propose an algorithm that iteratively
updates the design based on parameter estimates, which
converges to the design based on true parameters as the
number of probes increases. Using packet-level
simulations on real datasets, we verify that the
proposed design effectively reduces estimation error
compared with the common approach of uniformly
distributing probes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Birke:2015:WVM,
author = "Robert Birke and Mathias Bj{\"o}rkqvist and Cyriel
Minkenberg and Martin Schmatz and Lydia Y. Chen",
title = "When Virtual Meets Physical at the Edge: a Field Study
on Datacenters' Virtual Traffic",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "403--415",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The wide deployment of virtualization in datacenters
catalyzes the emergence of virtual traffic that
delivers the network demands between the physical
network and the virtual machines hosting clients'
services. Virtual traffic presents new opportunities
for reducing physical network demands, as well as
challenges of increasing management complexity. Given
the plethora of prior art on virtualization
technologies in datacenters, surprisingly little is
still known about such virtual traffic, and its
dependence on the physical network and virtual
machines. This paper provides a multi-faceted analysis
of the patterns and impacts of multiplexing the virtual
traffic onto the physical network, particularly from
the perspective of the network edge. We use a large
collection of field data from production datacenters
hosting a large number of diversified services from
multiple enterprise tenants. Our first focus is on
uncovering the temporal and spatial characteristics of
the virtual and physical traffic, i.e., network demand
growth and communication patterns, with special
attention paid to the traffic of migrating virtual
machines. The second focus is on characterizing the
effect of network multiplexing in terms of
communication locality, traffic load heterogeneity, and
the dependency on CPU processing power at the edges of
the network. Last but not least, we conduct a mirroring
analysis on service QoS, defined by the service
unavailability induced by network related issues, e.g.,
loads. We qualitatively and quantitatively discuss the
implications and opportunities that virtual traffic
presents for network capacity planning of virtualized
networks and datacenters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xiao:2015:HCV,
author = "Qingjun Xiao and Shigang Chen and Min Chen and Yibei
Ling",
title = "Hyper-Compact Virtual Estimators for Big Network Data
Based on Register Sharing",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "417--428",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cardinality estimation over big network data
consisting of numerous flows is a fundamental problem
with many practical applications. Traditionally the
research on this problem focused on using a small
amount of memory to estimate each flow's cardinality
from a large range (up to $ 10^9$). However, although
the memory needed for each flow has been greatly
compressed, when there is an extremely large number of
flows, the overall memory demand can still be very
high, exceeding the availability under some important
scenarios, such as implementing online measurement
modules in network processors using only on-chip cache
memory. In this paper, instead of allocating a
separated data structure (called estimator ) for each
flow, we take a different path by viewing all the flows
together as a whole: Each flow is allocated with a
virtual estimator, and these virtual estimators share a
common memory space. We discover that sharing at the
register (multi-bit) level is superior than sharing at
the bit level. We propose a framework of virtual
estimators that allows us to apply the idea of sharing
to an array of cardinality estimation solutions,
achieving far better memory efficiency than the best
existing work. Our experiment shows that the new
solution can work in a tight memory space of less than
1 bit per flow or even one tenth of a bit per flow ---
a quest that has never been realized before.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kotronis:2015:IPI,
author = "Vasileios Kotronis and Rowan Kl{\"o}ti and Matthias
Rost and Panagiotis Georgopoulos and Bernhard Ager and
Stefan Schmid and Xenofontas Dimitropoulos",
title = "Investigating the Potential of the Inter-{IXP}
Multigraph for the Provisioning of Guaranteed
End-to-End Services",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "429--430",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we propose utilizing the rich
connectivity between IXPs and ISPs for inter-domain
path stitching, supervised by centralized QoS brokers.
In this context, we highlight a novel abstraction of
the Internet topology, i.e., the inter-IXP multigraph
composed of IXPs and paths crossing the domains of
their shared member ISPs. This can potentially serve as
a dense Internet-wide substrate for provisioning
guaranteed end-to-end (e2e) services with high path
diversity and global IPv4 address space reach. We thus
map the IXP multigraph, evaluate its potential, and
introduce a rich algorithmic framework for path
stitching on such graph structures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2015:MSA,
author = "Rahul Singh and Alexander Stolyar",
title = "{MaxWeight} Scheduling: Asymptotic Behavior of
Unscaled Queue-Differentials in Heavy Traffic",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "431--432",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The model is a ``generalized switch'', serving
multiple traffic flows in discrete time. The switch
uses MaxWeight algorithm to make a service decision
(scheduling choice) at each time step, which determines
the probability distribution of the amount of service
that will be provided. We are primarily motivated by
the following question: in the heavy traffic regime,
when the switch load approaches critical level, will
the service processes provided to each flow remain
``smooth'' (i.e., without large gaps in service)?
Addressing this question reduces to the analysis of the
asymptotic behavior of the unscaled queue-differential
process in heavy traffic. We prove that the stationary
regime of this process converges to that of a positive
recurrent Markov chain, whose structure we explicitly
describe. This in turn implies asymptotic
``smoothness'' of the service processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fuerst:2015:KTE,
author = "Carlo Fuerst and Stefan Schmid and Lalith Suresh and
Paolo Costa",
title = "Kraken: Towards Elastic Performance Guarantees in
Multi-tenant Data Centers",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "433--434",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well-known that without strict network bandwidth
guarantees, application performance in multi-tenant
cloud environments is unpredictable. While recently
proposed systems support explicit bandwidth reservation
mechanisms, they require the resource schedules to be
announced ahead of time. We argue that this is not
practical in today's cloud environments, where
application demands are inherently unpredictable, e.g.,
due to stragglers. We in this paper present KRAKEN, a
system that allows tenants to dynamically request and
update minimum resource guarantees for both network
bandwidth and compute resources at runtime. Unlike
previous work, Kraken does not require prior knowledge
about the resource needs of the tenants' applications
but allows tenants to modify their reservation at
runtime. Kraken achieves this through an online
resource reservation scheme, and by optimally embedding
and reconfiguring virtual networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2015:LSD,
author = "Keqiang He and Junaid Khalid and Sourav Das and Aaron
Gember-Jacobson and Chaithan Prakash and Aditya Akella
and Li Erran Li and Marina Thottan",
title = "Latency in Software Defined Networks: Measurements and
Mitigation Techniques",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "435--436",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We conduct a comprehensive measurement study of switch
control plane latencies using four types of production
SDN switches. Our measurements show that control
actions, such as rule installation, have surprisingly
high latency, due to both software implementation
inefficiencies and fundamental traits of switch
hardware. We also propose three measurement-driven
latency mitigation techniques---optimizing route
selection, spreading rules across switches, and
reordering rule installations---to effectively tame the
flow setup latencies in SDN.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fu:2015:TSB,
author = "Yongquan Fu and Ernst Biersack",
title = "Tree-structured {Bloom} Filters for Joint Optimization
of False Positive Probability and Transmission
Bandwidth",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "437--438",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Bloom filters are frequently used to perform set
queries that test the existence of some items. However,
Bloom filters face a dilemma: the transmission
bandwidth and the accuracy cannot be optimized
simultaneously. This dilemma is particularly severe for
transmitting Bloom filters to remote nodes when the
network bandwidth is limited. We propose a novel Bloom
filter BloomTree that consists of a tree-structured
organization of smaller Bloom filters, each one using a
set of independent hash functions. BloomTree spreads
items across levels that are compressed to reduce the
transmission bandwidth need. We investigate in detail
under which conditions BloomTree performs better than
the compressed Bloom filter and the standard Bloom
filter.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghaderi:2015:SSS,
author = "Javad Ghaderi and Sanjay Shakkottai and Rayadurgam
Srikant",
title = "Scheduling Storms and Streams in the Cloud",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "439--440",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745882",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by emerging big streaming data processing
paradigms (e.g., Twitter Storm, Streaming MapReduce),
we investigate the problem of scheduling graphs over a
large cluster of servers. Each graph is a job, where
nodes represent compute tasks and edges indicate
data-flows between these compute tasks. Jobs (graphs)
arrive randomly over time, and upon completion, leave
the system. When a job arrives, the scheduler needs to
partition the graph and distribute it over the servers
to satisfy load balancing and cost considerations.
Specifically, neighboring compute tasks in the graph
that are mapped to different servers incur load on the
network; thus a mapping of the jobs among the servers
incurs a cost that is proportional to the number of ``b
roken edges''. We propose a low complexity randomized
scheduling algorithm that, without service preemptions,
stabilizes the system with graph arrivals/departures;
more importantly, it allows a smooth trade-off between
minimizing average partitioning cost and average queue
lengths. Interestingly, to avoid service preemptions,
our approach does not rely on a Gibbs sampler; instead,
we show that the corresponding limiting invariant
measure has an interpretation stemming from a loss
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meirom:2015:LED,
author = "Eli A. Meirom and Chris Milling and Constantine
Caramanis and Shie Mannor and Sanjay Shakkottai and
Ariel Orda",
title = "Localized Epidemic Detection in Networks with
Overwhelming Noise",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "441--442",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745883",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of detecting an epidemic in a
population where individual diagnoses are extremely
noisy. We show that exclusively local, approximate
knowledge of the contact network suffices to accurately
detect the epidemic. The motivation for this problem is
the plethora of examples (influenza strains in humans,
or computer viruses in smartphones, etc.) where
reliable diagnoses are scarce, but noisy data
plentiful. In flu or phone-viruses, exceedingly few
infected people/phones are professionally diagnosed
(only a small fraction go to a doctor) but less
reliable secondary signatures (e.g., people staying
home, or greater-than-typical upload activity) are more
readily available. Our algorithm requires only
local-neighbor knowledge of this graph, and in a broad
array of settings that we describe, succeeds even when
false negatives and false positives make up an
overwhelming majority of the data available. Our
results show it succeeds in the presence of partial
information about the contact network, and also when
are many (hundreds, in our examples) of initial
patients-zero.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2015:PBE,
author = "Zhi Zhou and Fangming Liu and Zongpeng Li",
title = "Pricing Bilateral Electricity Trade between Smart
Grids and Hybrid Green Datacenters",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "443--444",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Datacenter demand response is envisioned as a
promising approach for mitigating operational
instability faced by smart grids. It enables
significant potentials in peak load shedding and
facilitates the incorporation of distributed generation
and intermittent energy sources. This work considers
two key aspects towards realtime electricity pricing
for eliciting demand response: (i) Two-way electricity
flow between smart grids and large datacenters with
hybrid green generation capabilities. (ii) The
geo-distributed nature of large cloud systems, and
hence the potential competition among smart grids that
serve different datacenters of the cloud. We propose a
pricing scheme tailored for geo-distributed green
datacenters, from a multi-leader single-follower game
point of view. At the cloud side, in quest for
performance, scalability and robustness, the energy
cost is minimized in a distributed manner, based on the
technique of alternating direction of multipliers
(ADMM). At the smart grid side, a practical equilibrium
of the pricing game is desired. To this end, we employ
mathematical programming with equilibrium constraints
(MPEC), equilibrium problem with equilibrium
constraints (EPEC) and exact linearization, to
transform the multi-leader single-follower pricing game
into a mixed integer linear program (MILP) that can be
readily solved. The effectiveness of the proposed
solutions is evaluated based on trace-driven
simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnasamy:2015:DSR,
author = "Subhashini Krishnasamy and Rajat Sen and Sewoong Oh
and Sanjay Shakkottai",
title = "Detecting Sponsored Recommendations",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "445--446",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Personalized recommender systems provide great
opportunities for targeted advertisements, by
displaying ads alongside genuine recommendations. We
consider a biased recommendation system where such ads
are displayed without any tags (disguised as genuine
recommendations), rendering them indistinguishable to
users. We consider the problem of detecting such a bias
and propose an algorithm that uses statistical analysis
based on binary feedback data from a subset of users.
We prove that the proposed algorithm detects bias with
high probability for a broad class of recommendation
systems with sufficient number of feedback samples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:2015:UPP,
author = "Yong Zhao and Jia Rao and Xiaobo Zhou and Qing Yi",
title = "Understanding Parallel Performance Under Interferences
in Multi-tenant Clouds",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "447--448",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of parallel programs is notoriously
difficult to reason in virtualized environments.
Although performance degradations caused by
virtualization and interferences have been well
studied, there is little understanding why different
parallel programs have unpredictable slow- downs. We
find that unpredictable performance is the result of
complex interplays between the design of the program,
the memory hierarchy of the hosting system, and the CPU
scheduling in the hypervisor. We develop a profiling
tool, vProfile, to decompose parallel runtime into
three parts: compute, steal and synchronization. With
the help of time breakdown, we devise two optimizations
at the hypervisor to reduce slowdowns.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:2015:CIP,
author = "Rui Wu and Jiaming Xu and Rayadurgam Srikant and
Laurent Massoulie and Marc Lelarge and Bruce Hajek",
title = "Clustering and Inference From Pairwise Comparisons",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "449--450",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745887",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Given a set of pairwise comparisons, the classical
ranking problem computes a single ranking that best
represents the preferences of all users. In this paper,
we study the problem of inferring individual
preferences, arising in the context of making
personalized recommendations. In particular, we assume
users form clusters; users of the same cluster provide
similar pairwise comparisons for the items according to
the Bradley-Terry model. We propose an efficient
algorithm to estimate the preference for each user:
first, compute the net-win vector for each user using
the comparisons; second, cluster the users based on the
net-win vectors; third, estimate a single preference
for each cluster separately. We show that the net-win
vectors are much less noisy than the high dimensional
vectors of pairwise comparisons, therefore our
algorithm can cluster the users reliably. Moreover, we
show that, when a cluster is only approximately
correct, the maximum likelihood estimation for the
Bradley-Terry model is still close to the true
preference.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Venkatakrishnan:2015:DNO,
author = "Shaileshh Bojja Venkatakrishnan and Pramod Viswanath",
title = "Deterministic Near-Optimal {P2P} Streaming",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "451--452",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745888",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider live-streaming over a peer-to-peer network
in which peers are allowed to enter or leave the system
adversarially and arbitrarily. Previous approaches for
streaming have either used randomized distribution
graphs or structured trees with randomized maintenance
algorithms. Randomized graphs handle peer churn well
but have only probabilistic connectivity guarantees,
while structured trees have good connectivity but have
proven hard to maintain under peer churn. We improve
upon both approaches by presenting a novel distribution
structure with a deterministic and distributed
algorithm for maintenance under peer churn. The
algorithm has a constant repair time for connectivity,
and near optimal delay. As opposed to order results,
the guarantees provided by our algorithm are exact and
hold for any network size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mirhoseini:2015:FTL,
author = "Azalia Mirhoseini and Ebrahim M. Songhori and Bita
Darvish Rouhani and Farinaz Koushanfar",
title = "Flexible Transformations For Learning Big Data",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "453--454",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745889",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a domain-specific solution for
iterative learning of big and dense (non-sparse)
datasets. A large host of learning algorithms,
including linear and regularized regression techniques,
rely on iterative updates on the data connectivity
matrix in order to converge to a solution. The
performance of such algorithms often severely degrade
when it comes to large and dense data. Massive dense
datasets not only induce obligatory large number of
arithmetics, but they also incur unwanted message
passing cost across the processing nodes. Our key
observation is that despite the seemingly dense
structures, in many applications, data can be
transformed into a new space where sparse structures
become revealed. We propose a scalable data
transformation scheme that enables creating versatile
sparse representations of the data. The transformation
can be tuned to benefit the underlying platform's cost
and constraints. Our evaluations demonstrate
significant improvement in energy usage, runtime, and
mem",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2015:ECM,
author = "Jian Li and Bainan Xia and Xinbo Geng and Hao Ming and
Srinivas Shakkottai and Vijay Subramanian and Le Xie",
title = "Energy Coupon: a Mean Field Game Perspective on Demand
Response in Smart Grids",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "455--456",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745890",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Umar:2015:DLA,
author = "Ibrahim Umar and Otto Johan Anshus and Phuong Hoai
Ha",
title = "{DeltaTree}: a Locality-aware Concurrent Search Tree",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "457--458",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Like other fundamental abstractions for
high-performance computing, search trees need to
support both high concurrency and data locality.
However, existing locality-aware search trees based on
the van Emde Boas layout (vEB-based trees), poorly
support concurrent (update) operations. We present
DeltaTree, a practical locality-aware concurrent search
tree that integrates both locality-optimization
techniques from vEB-based trees, and concurrency
optimization techniques from highly-concurrent search
trees. As a result, DeltaTree minimizes data transfer
from memory to CPU and supports high concurrency. Our
experimental evaluation shows that DeltaTree is up to
50\% faster than highly concurrent B-trees on a
commodity Intel high performance computing (HPC)
platform and up to 65\% faster on a commodity ARM
embedded platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahmed:2015:DLE,
author = "Faraz Ahmed and Jeffrey Erman and Zihui Ge and Alex X.
Liu and Jia Wang and He Yan",
title = "Detecting and Localizing End-to-End Performance
Degradation for Cellular Data Services",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "459--460",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745892",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Nowadays mobile device (e.g., smartphone) users not
only have a high expectation on the availability of the
cellular data service, but also increasingly depend on
the high end-to-end (E2E) performance of their
applications. Since the E2E performance of individual
application sessions may vary greatly, depending on
factors such as the cellular network condition, the
content provider, the type/model of the mobile devices,
and the application software, detecting and localizing
service performance degradations in a timely manner at
large scale is of great value to cellular service
providers. In this paper, we build a holistic
measurement system that tracks session-level E2E
performance metrics along with the service attributes
for these factors. Using data collected from a major
cellular service provider, we first model the expected
E2E service performance with a regression based
approach, detect performance degradation conditions
based on the time series of fine-grained measurement
data, and finally localize the service degradation
using association-rule-mining techniques. Our
deployment experience reveals that in 80\% of the
detected problem instances, performance degradation can
be attributed to non-network-location specific factors,
such as a common content provider, or a set of
applications running on certain models of devices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varloot:2015:SGD,
author = "R{\'e}mi Varloot and Ana Bus{\'\i}{\"u}'c and Anne
Bouillard",
title = "Speeding up {Glauber} Dynamics for Random Generation
of Independent Sets",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "461--462",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745893",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The maximum independent set (MIS) problem is a
well-studied combinatorial optimization problem that
naturally arises in many applications, such as wireless
communication, information theory and statistical
mechanics. MIS problem is NP-hard, thus many results in
the literature focus on fast generation of maximal
independent sets of high cardinality. One possibility
is to combine Gibbs sampling with coupling from the
past arguments to detect convergence to the stationary
regime. This results in a sampling procedure with time
complexity that depends on the mixing time of the
Glauber dynamics Markov chain. We propose an adaptive
method for random event generation in the Glauber
dynamics that considers only the events that are
effective in the coupling from the past scheme,
accelerating the convergence time of the Gibbs sampling
algorithm. The full paper is available on arXiv.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:OEC,
author = "Linquan Zhang and Zongpeng Li and Chuan Wu and Shaolei
Ren",
title = "Online Electricity Cost Saving Algorithms for
Co-Location Data Centers",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "463--464",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work studies the online electricity cost
minimization problem at a co-location data center. A
co-location data center serves multiple tenants who
rent the physical infrastructure within the data center
to run their respective cloud computing services.
Consequently, the co-location operator has no direct
control over power consumption of its tenants, and an
efficient mechanism is desired for eliciting desirable
consumption patterns from the co-location tenants.
Electricity billing faced by a data center is nowadays
based on both the total volume consumed and the peak
consumption rate. This leads to an interesting new
combinatorial optimization structure on the electricity
cost optimization problem, which also exhibits an
online nature due to the definition of peak
consumption. We model and solve the problem through two
approaches: the pricing approach and the auction
approach. For the former, we design an offline
2-approximation algorithm as well as an online
algorithm with a small competitive ratio in most
practical settings. For the latter, we design an
efficient (2+ c )-competitive online algorithm, where c
is a system dependent parameter close to 1.49, and then
convert it into an efficient mechanism that executes in
an online fashion, runs in polynomial time, and
guarantees truthful bidding and (2+2 c )-competitive in
social cost.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ducoffe:2015:WTC,
author = "Guillaume Ducoffe and Mathias L{\'e}cuyer and Augustin
Chaintreau and Roxana Geambasu",
title = "{Web} Transparency for Complex Targeting: Algorithms,
Limits, and Tradeoffs",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "465--466",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Big Data promises important societal progress but
exacerbates the need for due process and
accountability. Companies and institutions can now
discriminate between users at an individual level using
collected data or past behavior. Worse, today they can
do so in near perfect opacity. The nascent field of web
transparency aims to develop the tools and methods
necessary to reveal how information is used, however
today it lacks robust tools that let users and
investigators identify targeting using multiple inputs.
In this paper, we formalize for the first time the
problem of detecting and identifying targeting on
combinations of inputs and provide the first algorithm
that is asymptotically exact. This algorithm is
designed to serve as a theoretical foundational block
to build future scalable and robust web transparency
tools. It offers three key properties. First, our
algorithm is service agnostic and applies to a variety
of settings under a broad set of assumptions. Second,
our algorithm's analysis delineates a theoretical
detection limit that characterizes which forms of
targeting can be distinguished from noise and which
cannot. Third, our algorithm establishes fundamental
tradeoffs that lead the way to new metrics for the
science of web transparency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2015:LBO,
author = "Varun Gupta and Ana Radovanovic",
title = "{Lagrangian}-based Online Stochastic Bin Packing",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "467--468",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745897",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the problem of packing Virtual Machines
on physical servers in the cloud, we study the problem
of online stochastic bin packing under two settings ---
packing with permanent items, and packing under item
departures. In the setting with permanent items, we
present the first truly distribution-oblivious bin
packing heuristic that achieves $ O(\sqrt n) $ regret
compared to OPT for all distributions. Our algorithm is
essentially gradient descent on suitably defined
Lagrangian relaxation of the bin packing Linear
Program. We also prove guarantees of our heuristic
against non i.i.d. input using a randomly delayed
Lyapunov function to smoothen the input. For the
setting where items eventually depart, we are
interested in minimizing the steady-state number of
bins. Our algorithm extends as is to the case of item
departures. Further, leveraging the Lagrangian
approach, we generalize our algorithm to a setting
where the processing time of an item is inflated by a
certain known factor depending on the configuration it
is packed in.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2015:TCI,
author = "Vani Gupta and Stephen Lee and Prashant Shenoy and
Ramesh Sitaraman and Rahul Urgaonkar",
title = "Towards Cooling {Internet}-Scale Distributed Networks
on the Cheap",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "469--470",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet-scale Distributed Networks (IDNs) are large
distributed systems that comprise hundreds of thousands
of servers located around the world. IDNs consume
significant amounts of energy to power their deployed
server infrastructure, and nearly as much energy to
cool that infrastructure. We study the potential
benefits of using renewable open air cooling (OAC) in
an IDN. Our results show that by using OAC, a global
IDN can extract 51\% cooling energy reducing during
summers and a 92\% reduction in the winter.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clapp:2015:SMQ,
author = "Russell Clapp and Martin Dimitrov and Karthik Kumar
and Vish Viswanathan and Thomas Willhalm",
title = "A Simple Model to Quantify the Impact of Memory
Latency and Bandwidth on Performance",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "471--472",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, DRAM technology improvements have
scaled at a much slower pace than processors. While
server processor core counts grow from 33\% to 50\% on
a yearly cadence, DDR4 memory channel bandwidth has
grown at a slower rate, and memory latency has remained
relatively flat for some time. Meanwhile, new computing
paradigms have emerged, which involve analyzing massive
volumes of data in real time and place pressure on the
memory subsystem. The combination of these trends makes
it important for computer architects to understand the
sensitivity of the workload performance to memory
bandwidth and latency. In this paper, we outline and
validate a methodology for quick and quantitative
performance estimation using a real-world workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2015:PCH,
author = "Qiumin Xu and Huzefa Siyamwala and Mrinmoy Ghosh and
Manu Awasthi and Tameesh Suri and Zvika Guz and Anahita
Shayesteh and Vijay Balakrishnan",
title = "Performance Characterization of Hyperscale
Applicationson on {NVMe SSDs}",
journal = j-SIGMETRICS,
volume = "43",
number = "1",
pages = "473--474",
month = jun,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2796314.2745901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The storage subsystem has undergone tremendous
innovation in order to keep up with the ever-increasing
demand for throughput. NVMe based SSDs are the latest
development in this domain, delivering unprecedented
performance in terms of both latency and peak
bandwidth. Given their superior performance, NVMe
drives are expected to be particularly beneficial for
I/O intensive applications in datacenter installations.
In this paper we identify and analyze the different
factors leading to the better performance of NVMe SSDs.
Then, using databases as the prominent use-case, we
show how these would translate into real-world
benefits. We evaluate both a relational database
(MySQL) and a NoSQL database (Cassandra) and
demonstrate significant performance gains over
best-in-class enterprise SATA SSDs: from 3.5x for TPC-C
and up to 8.5x for Cassandra.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2015:SLS,
author = "Leana Golubchik and Bert Zwart",
title = "Spatial Loss Systems: Exact Simulation and Rare Event
Behavior",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "3--6",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider spatial marked Poisson arrivals in a
Polish space. These arrivals are accepted or lost in a
general state dependent manner. The accepted arrivals
remain in the system for a random amount of time, where
the individual sojourn times are i.i.d. For such
systems, we develop semi-closed form expressions for
the steady state probabilities that can be seen to be
insensitive to the sojourn time distribution, and that
rely essentially on the static probabilities of marked
Poisson objects meeting the state acceptance criteria.
The latter observation is then exploited to yield
straightforward exact simulation algorithms to sample
from the steady state distribution. In addition, for
the special case where the arrivals are spheres in a
Euclidean space that are lost whenever they overlap
with an existing sphere, we develop large deviations
asymptotics for the probability of observing a large
number of spheres in the system in steady state, under
diverse asymptotic regimes. Applications include
modeling interference in wireless networks and
connectivity in ad-hoc networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Onderwater:2015:LOP,
author = "Martijn Onderwater and Sandjai Bhulai and Rob van der
Mei",
title = "Learning Optimal Policies in {Markov} Decision
Processes with Value Function Discovery?",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "7--9",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825239",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe recent progress in our work
on Value Function Discovery (vfd), a novel method for
discovery of value functions for Markov Decision
Processes (mdps). In a previous paper we described how
vfd discovers algebraic descriptions of value functions
(and the corresponding policies) using ideas from the
Evolutionary Algorithm field. A special feature of vfd
is that the descriptions include the model parameters
of the mdp. We extend that work and show how additional
information about the structure of the mdp can be
included in vfd. This alternative use of vfd still
yields near-optimal policies, and is much faster.
Besides increased performance and improved run times,
this approach illustrates that vfd is not restricted to
learning value functions and can be applied more
generally.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanLeeuwaarden:2015:DWS,
author = "Johan S. H. van Leeuwaarden and Britt W. J. Mathijsen
and Fiona Sloothaak",
title = "Delayed workload shifting in many-server systems",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "10--12",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825240",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the desire to shift workload during
periods of overload, we extend established square-root
capacity sizing rules for many-server systems in the
Quality-and-Efficiency Driven (QED) regime. We propose
Delayed Workload Shifting (DWS) which has two defining
features: when there are n users in the system, newly
arriving users are no longer admitted directly.
Instead, these users will reattempt getting access
after a stochastic delay until they are successful. The
goal of DWS is to release pressure from the system
during overloaded periods, and indeed we show that the
performance gain can be substantial. We derive
nontrivial corrections to classical QED approximations
to account for DWS, and leverage these to control
stationary and time-varying system behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cecchi:2015:MFA,
author = "F. Cecchi and S. C. Borst and J. S. H. van
Leeuwaardena",
title = "Mean-Field Analysis of Ultra-Dense {CSMA} Networks",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "13--15",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825241",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed algorithms such as CSMA provide a popular
mechanism for sharing the transmission medium among
competing users in large-scale wireless networks.
Conventional models for CSMA that are amenable for
analysis assume that users always have packets to
transmit. In contrast, when users do not compete for
medium access when their buffers are empty, a complex
interaction arises between the activity states and the
buffer contents. We develop a meanfield approach to
investigate this dynamic interaction for networks with
many users. We identify a time-scale separation between
the evolution of the activity states and the buffer
contents, and obtain a deterministic dynamical system
describing the network dynamics on a macroscopic scale.
The fixed point of the dynamical system yields highly
accurate approximations for the stationary distribution
of the buffer contents and packet delay, even when the
number of users is relatively moderate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patch:2015:PFL,
author = "Brendan Patch and Thomas Taimre and Yoni Nazarathy",
title = "Performance of Faulty Loss Systems with Persistent
Connections",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "16--18",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825242",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a finite capacity Erlang loss system that
alternates between active and inactive states according
to a two state modulating Markov process. Work arrives
to the system as a Poisson process but is blocked from
entry when the system is at capacity or inactive.
Blocked jobs cost the owner a fixed amount that depends
on whether blockage was due to the system being at
capacity or due to the system being inactive. Jobs
which are present in the system when it becomes
inactive pause processing until the system becomes
active again. A Laplace transform expression for the
expected undiscounted revenue lost in $ [0, t] $ due to
blocking is found. Further, an expression for the total
time discounted expected lost revenue in $ [0, ?) $ is
provided. We also derive a second order approximation
to the former that can be used when the computing power
to invert the Laplace transform is not available. These
expressions can be used to ascribe a value to four
alternatives for improving system performance: (i)
increasing capacity, (ii) increasing the service rate,
(iii) increasing the repair rate, or (iv) decreasing
the failure rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shneer:2015:SII,
author = "Seva Shneer and Peter M. van de Ven",
title = "Stability and instability of individual nodes in
multi-hop wireless {CSMA\slash CA} networks",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "19--21",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825243",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "CSMA/CA is a popular random-access algorithm for
wireless networks, but its stability properties are
poorly understood. We consider a linear multi-hop
network of three nodes where the neighbouring nodes
interfere with each other and medium access is governed
by the CSMA/CA algorithm. We assume that the source
node is saturated and packets are forwarded through the
network, each node transmitting towards its neighbour
on the right. We demonstrate that the queue of the
second node is saturated (unstable) and the queue of
the third node is stable; this confirms heuristic
arguments and simulation results found in the research
literature. Providing a rigorous proof for the
(in)stability of these nodes is complicated by the fact
that neither queue is Markovian when considered in
isolation, and the two queues are dependent. We then
compute the limiting behavior of node 3, and use this
to determine the end-to-end throughput of the network.
Finally, we vary the access probabilities of the nodes,
and evaluate how this affects the stability and
throughput of the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brun:2015:FMB,
author = "O. Brun and H. Ben Cheikh and B. J. Prabhu",
title = "A Fluid model based Heuristic for Optimal
Speed-scaling of Multi-class Single Server Queues",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "22--23",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825245",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the energy-delay tradeoff in
multi-class queues in which the server can regulate its
speed according to the load of the system. Assuming
that the queue is initially congested, we investigate
the rate allocation to the classes that drains out the
queue with minimum total energy and delay cost. We
propose to solve this stochastic problem using a
deterministic fluid approximation. We show that the
optimal-fluid solution follows the well-known c? rule
and obtain an explicit expression for the optimal
speed. Numerical results show the utility and the
applicability of the fluid-optimal policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jonckheere:2015:GBA,
author = "Matthieu Jonckheere and Seva Shneer",
title = "Gradient bandwidth allocations",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "24--25",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825246",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We look at bandwidth-sharing networks where bandwidth
allocations are not known to maximize a priori any
utility function. Instead, we only require the
allocation functions to be 0-homogeneous and concave,
which are desirable properties in many situations. We
show that a certain gradient condition is necessary and
sufficient for such allocations to solve an
optimization problem leading to important corollaries
such as deriving the stability set of these
0-homogeneous concave allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleinrouweler:2015:MES,
author = "Jan Willem Kleinrouweler and Sergio Cabrero and Rob
van der Mei and Pablo Cesar",
title = "Modeling the Effect of Sharing Policies for
Network-assisted {HTTP} Adaptive Video Streaming",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "26--27",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825247",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:2015:HLR,
author = "Naresh M. Patel",
title = "Half-Latency Rule for Finding the Knee of the Latency
Curve",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "28--29",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825248",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Latency curves for computer systems typically rise
rapidly beyond some threshold utilization and many
mathematical methods have been suggested to find this
knee, but none seem to match common practice. This
paper proposes a trade-off metric called ATP (an
alternative to Kleinrock's power metric) which
generates a half-latency rule for calculating the
location of the knee for a latency curve. Exact
analysis with this approach applied to the simplest
single-server queue results in an optimal server
utilization of 71.5\%, which is close to the 70\%
utilization used in practice. The half-latency rule
also applies to practical situations that generate a
discrete set of throughput and latency measurements.
The discrete use cases include both production systems
(for provisioning new work) or lab systems (for
summarizing the entire latency curve into a single
figure of merit for each workload and system
configuration).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Touati:2015:AJS,
author = "Mikael Touati and Rachid Elazouzi and Marceau
Coupechoux and Eitan Altman and Jean-Marc Kelif",
title = "About Joint Stable User Association and Resource
Allocation in Multi-Rate {IEEE 802.11 WLANs}",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "30--31",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825249",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper aims at proposing an alternative approach
for both the modeling of the IEEE 802.11 resource
allocation scheme and the design of mechanisms to
reduce the impact of the anomaly of the protocol. We
use game theory to model the IEEE 802.11 resource
allocation and mobiles users to APs association as a
coalition matching game. We propose a new mechanism
that gives mobile users and APs the incentive to
associate with each others in a way that both absorbs
the load and reduce the negative impact of the anomaly
in IEEE 802.11.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:2015:AER,
author = "Huaming Wu and Yi Sun and Katinka Wolter",
title = "Analysis of the Energy-Response Time Tradeoff for
Delayed Mobile Cloud Offloading",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "33--35",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825251",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a delayed offloading model to leverage the
complementary strength of WiFi and cellular networks
when choosing heterogeneous wireless interfaces for
offloading. Optimality analysis of the energy-delay
tradeoff is carried out by using a queueing model with
impatient jobs and service interruptions, which
captures both energy and performance metrics and also
intermittently available access links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2015:GMT,
author = "Niangjun Chen and Xiaoqi Ren and Shaolei Ren and Adam
Wierman",
title = "Greening Multi-Tenant Data Center Demand Response",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "36--38",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data centers have become critical resources for
emergency demand response (EDR). However, currently,
data centers typically participate in EDR by turning on
backup (diesel) generators, which are both expensive
and environmentally unfriendly. In this paper, we focus
on ``greening'' demand response in multi-tenant data
centers by incentivizing tenants' load reduction and
reducing on-site diesel generation. Our proposed
mechanism, ColoEDR, which is based on parameterized
supply function mechanism, provides provably
near-optimal efficiency guarantees, both when tenants
are price-taking and when they are
price-anticipating.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:PSD,
author = "Shaoquan Zhang and Longbo Huang and Minghua Chen and
Xin Liu",
title = "Proactive Serving Decreases User Delay Exponentially",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "39--41",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In online service systems, the delay experienced by a
user from the service request to the service completion
is one of the most critical performance metrics. To
improve user delay experience, recent industrial
practice suggests a modern system design mechanism:
proactive serving, where the system predicts future
user requests and allocates its capacity to serve these
upcoming requests proactively. In this paper, we
investigate the fundamentals of proactive serving from
a theoretical perspective. In particular, we show that
proactive serving decreases average delay exponentially
(as a function of the prediction window size). Our
results provide theoretical foundations for proactive
serving and shed light on its application in practical
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ren:2015:SAC,
author = "Xiaoqi Ren and Ganesh Ananthanarayanan and Adam
Wierman and Minlan Yu",
title = "Speculation-aware Cluster Scheduling",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "42--44",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stragglers are a crucial roadblock to achieving
predictable performance in today's clusters.
Speculation has been widely adopted in order to
mitigate the impact of stragglers; however speculation
mechanisms are designed and operated independently of
job scheduling when, in fact, scheduling a speculative
copy of a task has a direct impact on the resources
available for other jobs. In this work, based on a
simple model and its analysis, we design Hopper, a job
scheduler that is speculation-aware, i.e., that
integrates the tradeoffs associated with speculation
into job scheduling decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2015:MLE,
author = "Weikun Wang and Giuliano Casale",
title = "Maximum Likelihood Estimation of Closed Queueing
Network Demands from Queue Length Data",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "45--47",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825255",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose maximum likelihood (ML) estimators for
service demands in closed queueing networks with
load-independent and load-dependent stations. Our ML
estimators are expressed in implicit form and require
only to compute mean queue lengths and marginal queue
length probabilities from an empirical dataset.
Further, in the load-independent case, we provide an
explicit approximate formula for the ML estimator
together with confidence intervals.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kesidis:2015:NCP,
author = "G. Kesidis and Y. Shan and B. Urgaonkar and J.
Liebeherr",
title = "Network calculus for parallel processing",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "48--50",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825256",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present preliminary results on the
use of ``network calculus'' for parallel processing
(fork join) systems, e.g., MapReduce. We derive a
probabilistic bound that the delay through a single
parallel processing stage exceeds a threshold.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiorini:2015:EAS,
author = "Pierre M. Fiorini and Lester Lipsky",
title = "Exact Analysis of Some Split-Merge Queues",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "51--53",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joshi:2015:QRL,
author = "Gauri Joshi and Emina Soljanin and Gregory Wornell",
title = "Queues with Redundancy: Latency-Cost Analysis",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "54--56",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berger:2015:MCH,
author = "Daniel S. Berger and Sebastian Henningsen and Florin
Ciucu and Jens B. Schmitt",
title = "Maximizing Cache Hit Ratios by Variance Reduction",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "57--59",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TTL cache models provide an attractive unified
approximation framework for caching policies like LRU
and FIFO, whose exact analysis is notoriously hard. In
this paper, we advance the understanding of TTL models
by explicitly considering stochastic capacity
constraints. We find in particular that reducing the
variance of the cache occupancy is instrumental to
optimize the cache hit ratio in an online setting. To
enforce such a desired low variance, we propose a novel
extension of the TTL model by rewarding popular objects
with longer TTLs. An attractive feature of the proposed
model is that it remains closed under an exact network
analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2015:MBC,
author = "Jian Tan and Li Zhang and Yandong Wang",
title = "Miss behavior for caching with lease",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "60--62",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825260",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Caching with lease is to evict the data record from
cache after its associated lease term expires. This
policy differs from the traditional caching algorithms,
e.g., LRU, by introducing a dimension of time to the
data record stored in the cache. This model has
recently attracted increasing interest not only from a
theoretical perspective, but also in real system
implementation. For the related theoretical studies,
lease of each data record, also known as cache
characteristic time and Time-To-Live (TTL), provides a
convenient approximation that can simplify the
complexity in analyzing popular caching algorithms such
as LRU. This approach ignores the finite capacity of
the cache size and assumes the lease term to be a known
parameter that matches with the measurements. Recently,
with new development in system engineering, caching
with lease has been shown to be an efficient way to
improve the performance of RDMA based key-value stores.
This engineering practice imposes new challenges for
designing caching algorithms based on lease. It calls
for further theoretical investigation on the lease term
in presence of a finite cache capacity. To this end, we
derive the miss probabilities for caching with lease
compared to LRU, when the frequency of requesting a
data record is equal to the generalized Zipf's law.
Based on the miss probability depending on the lease
term, we also discuss adaptive algorithms that can
optimally determine the lease term.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2015:OGG,
author = "Yudong Yang and Vishal Misra and Dan Rubenstein",
title = "On the Optimality of Greedy Garbage Collection for
{SSDs}",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "63--65",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825261",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Solid state drives have been widely applied in modern
computer systems. The lifetime of the SSD depends
heavily on the efficiency of the implementation of the
garbage collection (GC) algorithm that reclaims
previously used pages. In this paper, we present the
first detailed proof that the greedy GC algorithm has
the optimal performance (minimized write amplification)
for memoryless workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spencer:2015:ILM,
author = "Sam Spencer and R. Srikant",
title = "On the Impossibility of Localizing Multiple Rumor
Sources in a Line Graph",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "66--68",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825262",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Here we examine the problem of rumor source
identification in line graphs. We assume the SI model
for rumor propagation with exponential waiting times.
We consider the case where a rumor originates from two
sources simultaneously, and evaluate the likelihood
function for the given observations given those
sources. As the size of the infected region grows
arbitrarily large, we show that unlike the single
source case, where the likelihood function concentrates
near the midpoint of the infected region, the support
of the likelihood function in this case remains widely
distributed over the middle half of the infected
region. This makes the rumor sources impossible to
localize with high probability on any scale smaller
than that of the infection size itself.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2015:PTC,
author = "Nicolas Gast",
title = "The Power of Two Choices on Graphs: the
Pair-Approximation is Accurate?",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "69--71",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maguluri:2015:HTB,
author = "Siva Theja Maguluri and R. Srikant",
title = "Heavy-Traffic Behavior of the {MaxWeight} Algorithm in
a Switch with Uniform Traffic",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "72--74",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825264",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a switch with uniform traffic operating
under the MaxWeight scheduling algorithm. This traffic
pattern is interesting to study in the heavy-traffic
regime since the queue lengths exhibit a
multi-dimensional state-space collapse. We use a
Lyapunov-type drift technique to characterize the
heavy-traffic behavior of the expectation of the sum
queue lengths in steady-state. Specifically, in the
case of Bernoulli arrivals, we show that the
heavy-traffic scaled queue length is ( n --- 3/2 + 1/2
n ). Our result implies that the MaxWeight algorithm
has optimal queue-length scaling behavior in the
heavy-traffic regime with respect to the size of a
switch with a uniform traffic pattern. This settles the
heavy-traffic version of an open conjecture.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Busic:2015:AOB,
author = "Ana Bu{\v{s}}i{\'c} and Sean Meyn",
title = "Approximate optimality with bounded regret in dynamic
matching models",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "75--77",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825265",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2015:CEL,
author = "Yingdong Lu and Mark S. Squillante and Chai Wah Wu and
Bo Zhang",
title = "On the Control of Epidemic-Like Stochastic Processes
with Time-Varying Behavior?",
journal = j-SIGMETRICS,
volume = "43",
number = "2",
pages = "78--80",
month = sep,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2825236.2825266",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 18 06:59:51 MDT 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Canini:2015:HMP,
author = "Marco Canini and James Kempf and Stefan Schmid",
title = "How many planet-wide leaders should there be?",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "3--6",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847222",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Geo-replication becomes increasingly important for
modern planetary scale distributed systems, yet it
comes with a specific challenge: latency, bounded by
the speed of light. In particular, clients of a
geo-replicated system must communicate with a leader
which must in turn communicate with other replicas:
wrong selection of a leader may result in unnecessary
round-trips across the globe. Classical protocols such
as celebrated Paxos, have a single leader making them
unsuitable for serving widely dispersed clients. To
address this issue, several all-leader geo-replication
protocols have been proposed recently, in which every
replica acts as a leader. However, because these
protocols require coordination among all replicas,
committing a client's request at some replica may incur
the so-called ``delayed commit'' problem, which can
introduce even a higher latency than a classical
single-leader majority-based protocol such as Paxos. In
this paper, we argue that the ``right'' choice of the
number of leaders in a geo-replication protocol depends
on a given replica configuration and propose Droopy, an
optimization for state machine replication protocols
that explores the space between single-leader and
all-leader by dynamically reconfiguring the leader set.
We implement Droopy on top of Clock-RSM, a
state-of-the-art all-leader protocol. Our evaluation on
Amazon EC2 shows that, under typical imbalanced
workloads, Droopy-enabled Clock-RSM efficiently reduces
latency compared to native Clock-RSM, whereas in other
cases the latency is the same as that of the native
Clock-RSM.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2015:USR,
author = "Da Wang and Gauri Joshi and Gregory Wornell",
title = "Using Straggler Replication to Reduce Latency in
Large-scale Parallel Computing",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "7--11",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847223",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In cloud computing jobs consisting of many tasks run
in parallel, the tasks on the slowest machines
(straggling tasks) become the bottleneck in the
completion of the job. One way to combat the
variability in machine response time is to add replicas
of straggling tasks and wait for the earliest copy to
finish. Using the theory of extreme order statistics,
we analyze how task replication reduces latency, and
its impact on the cost of computing resources. We also
propose a heuristic algorithm to search for the best
replication strategies when it is difficult to model
the empirical behavior of task execution time and use
the proposed analysis techniques. Evaluation of the
heuristic policies on Google Trace data shows a
significant latency reduction compared to the
replication strategy used in MapReduce.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2015:ANA,
author = "Anshul Gandhi and Justin Chan",
title = "Analyzing the Network for {AWS} Distributed Cloud
Computing",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "12--15",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847224",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud computing is a global service. Cloud Service
Providers, such as AWS, allow users to launch VM
instances on multiple data centers (regions) around the
world. However, the network connectivity and bandwidth
between these different geographically distributed
regions varies significantly depending on the user's
location. In this paper, we analyze the network
performance between pairs of AWS instances hosted on
all available regions. We leverage our analysis to
derive the optimal hosting region for web service
providers depending on the customer locations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jia:2015:PCA,
author = "Rui Jia and Sherif Abdelwahed and Abdelkarim Erradi",
title = "A Predictive Control Approach for Fault Management of
Computing Systems",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "16--20",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847225",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, a model-based predictive control
approach for fault management in computing systems is
presented. The proposed approach can incorporate
existing fault diagnosis methods and fault recovery
actions to facilitate the recovery process. When a
fault is identified, the proposed algorithm uses
utility cost functions to compute the optimal recovery
solution that minimizes fault impacts on the system's
Quality of Service. The proposed approach has been
demonstrated on a Web service testbed under various
faults.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Netto:2015:ARI,
author = "Hylson Vescovi Netto and Lau Cheuk Lung and Tulio
Alberton Ribeiro and Miguel Correia and Aldelir
Fernando Luiz",
title = "Anticipating Requests to Improve Performance and
Reduce Costs in Cloud Storage",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "21--24",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847226",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Clouds are a suitable place to store data with
scalability and financial flexibility. However, it is
difficult to ensure the reliability of the data stored
in a cloud. Byzantine fault tolerance can improve
reliability, but at a high cost. This paper presents a
technique that anticipates requests in order to reduce
that cost. We show that this technique improves the
performance in comparison with related works and
maintains the desired data reliability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lavi:2015:ARP,
author = "Nadav Lavi and Hanoch Levy",
title = "Admit or Reject? {Preserve} or Drop?: {Operational}
Dilemmas upon Server Failures on the Cloud",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "25--29",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847227",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Server failures on the cloud introduce acute
operational dilemmas as now the cloud management entity
needs to handle existing task preservations in addition
to new task admissions. These admission and
preservation decisions have significant impact on the
cloud performance and operational cost, as they impact
future system decisions. Should a cloud manager prefer
to use resources for new task admissions and increase
the risk of dropping an already admitted task in the
future? Or should he/she prefer to maintain resources
for potential future task preservations at the expense
of new task admissions? These dilemmas are even more
critical in Distributed Cloud Computing (DCC) due to
the small scale of the micro Cloud Computing Center
(mCCC). In this paper we will address these questions
through the use of Markov Decision Process (MDP)
analysis. We will show that even though the problem
appears to be rather complicated (as the two decision
rules are coupled), our analysis reveals that it can be
significantly simplified (as one of the rules is of a
trivial form). These results enables us to compose a
holistic framework for cloud computing task
management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2015:TDT,
author = "Tianrong Zhang and Yufeng Xin",
title = "Towards Designing a Truthful Online Auction Framework
for Deadline-aware Cloud Resource Allocation",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "30--33",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Auction-based resource allocation mechanisms have been
believed as promising approaches to effectively
managing cloud resource supply and demand, and thus
recently attracted substantial research interests. In
this paper, we present a novel online auction framework
for deadline-aware cloud resource allocation. Our
framework consists of two major parts. We first design
an offline auction mechanism taking into account
deadline requirements from different resource requests,
and further present an online framework built on top of
the offline auction mechanism. Our theoretical analysis
shows that both the offline and online auction
mechanisms are truthful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tran:2015:CCD,
author = "Nguyen H. Tran and Cuong T. Do and Choong Seon Hong
and Shaolei Ren and Zhu Han",
title = "Coordinated Colocation Datacenters for Economic Demand
Responce",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "34--37",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847229",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Demand response of datacenters recently has received
increasing efforts due to huge demands and flexible
power control knobs. However, very few works focus on a
critical segment of datacenter business: multi-tenant
colocation. In colocation datacenters, while there
exist multiple tenants who manger their own servers,
the colocation operator only provides other facilities
such as cooling, reliable power, and network
connectivity. Therefore, colocation has a unique
challenge for the demand response: uncoordinated power
management among tenants. To tackle this challenge, we
study how to coordinate tenants for economic demand
response. We show that there is an interaction between
the operator and tenants' strategies, where each side
maximizes its own benefit. Hence, we apply a two-stage
Stackelberg game to analyze this scenario and derive
this game's equilibria. Finally, trace-based
simulations are also provided to illustrate the
efficacy of our proposed incentive schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ludwig:2015:DCM,
author = "Arne Ludwig and Stefan Schmid",
title = "Distributed Cloud Market: Who Benefits from
Specification Flexibilities?",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "38--41",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Virtualization is arguably the main innovation motor
in the Internet today. Virtualization enables the
decoupling of applications from the physical
infrastructure, and introduces new mapping and
scheduling flexibilities. While the corresponding
algorithmic problems are fairly well-understood, we
ask: Who reaps the benefits from the virtualization
flexibilities? We introduce two simple distributed
cloud market models and study this question in two
dimensions: (1) a horizontal market where different
cloud providers compete for the customer requests, and
(2) a vertical market where a broker resells the
resources of a cloud provider.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mao:2015:DAD,
author = "Bo Mao and Suzhen Wu",
title = "Deduplication-Assisted Data Reduction and Distribution
in Cloud-of-Clouds",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "42--42",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847231",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the increasing popularity and cost-effectiveness
of cloud storage, many companies and organizations have
moved or planned to move data out of their own data
centers into the cloud. However, solely depending on a
particular cloud storage provider has a number of
potentially serious problems [1]. First, it can cause
the so-called vendor lock-in problem for the customers,
which results in prohibitively high cost for clients to
switch from one provider to another. Second, it can
cause service disruptions, which in turn will lead to
SLA violation, due to cloud outages, resulting in
penalties, monetary or other forms, for the service
providers. Third, solely depending on a particular
cloud storage provider can also result in possible
increased service costs and data security issues, such
as the data leakage problem. Thus using multiple
independent cloud providers, so called Cloud-of-Clouds,
is an effective way to provide better availability for
the cloud storage systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2015:OLB,
author = "Anshul Gandhi and Naman Mittal and Xi Zhang",
title = "Optimal Load-Balancing for Heterogeneous Clusters",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "43--43",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Le:2015:ECA,
author = "Tan N. Le and Bong Jun Choi and Pradipta De",
title = "Energy Cost Aware Scheduling of {MapReduce} Jobs
across Geographically Distributed Nodes",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "44--44",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhojwani:2015:IDC,
author = "Sushil Bhojwani and Matt Hemmings and Dan Ingalls and
Jens Lincke and Robert Krahn and David Lary and Rick
McGeer and Glenn Ricart and Marko Roder and Yvonne
Coady and Ulrike Stege",
title = "The Ignite Distributed Collaborative Visualization
System",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "45--46",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847234",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maille:2015:ICD,
author = "Patrick Maill{\'e} and Bruno Tuffin",
title = "Impact of Content Delivery Networks on Service and
Content Innovation",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "49--52",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847236",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Content Delivery Networks (CDNs) are major actors of
the current telecommunication ecosystem. Our goal in
this paper is to study their impact on other actors of
the supply chain, especially on content innovation
which is a key concern in the network neutrality debate
where CDNs' role seems forgotten. Our findings indicate
that vertically integrating a CDN helps Internet
Service Providers (ISPs) collect fees from Content
Providers (CPs), hence circumventing the interdiction
of side payments coming from net-neutrality rules.
However, this outcome is socially much better in terms
of user quality and innovation fostering than having
separate actors providing the access and CDN services:
in the latter case double marginalization (both ISP and
CDN trying to get some value from the supply chain)
leads to suboptimal investments in CDN storage
capacities and higher prices for CPs, resulting in
reduced innovation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahuja:2015:PDW,
author = "Kartik Ahuja and Simpson Zhang and Mihaela van der
Schaar",
title = "The Population Dynamics of {Websites}: [Extended
Abstract]",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "53--56",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Websites derive revenue by advertising or charging
fees for services and so their profit depends on their
user base --- the number of users visiting the website.
But how should websites control their user base? This
paper is the first to address and answer this question.
It builds a model in which, starting from an initial
user base, the website controls the growth of the
population by choosing the intensity of referrals and
targeted ads to potential users. A larger population
provides more profit to the website, but building a
larger population through referrals and targeted ads is
costly; the optimal policy must therefore balance the
marginal benefit of adding users against the marginal
cost of referrals and targeted ads. The nature of the
optimal policy depends on a number of factors. Most
obvious is the initial user base; websites starting
with a small initial population should offer many
referrals and targeted ads at the beginning, but then
decrease referrals and targeted ads over time. Less
obvious factors are the type of website and the typical
length of time users remain on the site: the optimal
policy for a website that generates most of its revenue
from a core group of users who remain on the site for a
long time --- e.g., mobile and online gaming sites ---
should be more aggressive and protective of its user
base than that of a website whose revenue is more
uniformly distributed across users who remain on the
site only briefly. When arrivals and exits are
stochastic, the optimal policy is more aggressive ---
offering more referrals and targeted ads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luo:2015:PPP,
author = "Yuan Luo and Nihar B. Shah and Jianwei Huang and Jean
Walrand",
title = "Parametric Prediction from Parametric Agents?",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "57--57",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acemoglu:2015:PCN,
author = "Daron Acemoglu and Ali Makhdoumi and Azarakhsh
Malekian and Asu Ozdaglar",
title = "Privacy-Constrained Network Formation",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "58--58",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847239",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramachandran:2015:NEP,
author = "Arthi Ramachandran and Augustin Chaintreau",
title = "The Network Effect of Privacy Choices",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "59--62",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847240",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Afrasiabi:2015:CBP,
author = "M. H. Afrasiabi and R. Gu{\'e}rin",
title = "Choice-based Pricing for User-Provided Connectivity?",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "63--66",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847241",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "User-provided connectivity (UPC) leverages the network
connectivity of its users to build a service offering
that goes beyond their individual connectivity option,
i.e., allows them to roam. Because the service's
overall value typically grows as a function of its
coverage, it is important to devise pricing policies
that make it attractive to all users, even those who
derive little value from roaming. This paper builds on
earlier work that explored the value of a UPC service,
and proposes a new pricing policy, the Price choice
policy, that seeks to realize an effective compromise
between pricing complexity and the policy's ability to
maximize system value and extract profit. The paper
illustrates the benefits of the proposed policy by
demonstrating why and how it outperforms several
previously proposed policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meir:2015:PWG,
author = "Reshef Meir and David Parkes",
title = "Playing the Wrong Game: Smoothness Bounds for
Congestion Games with Behavioral Biases",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "67--70",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847242",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many situations a player may act so as to maximize
a perceived utility that is not exactly her utility
function, but rather some other, biased, utility.
Examples of such biased utility functions are common in
behavioral economics, and include risk attitudes,
altruism, present-bias and so on. When analyzing a
game, one may ask how inefficiency, measured by the
Price of Anarchy (PoA) is affected by the perceived
utilities. The smoothness method [16, 15] naturally
extends to games with such perceived utilities or
costs, regardless of the game or the behavioral bias.
We show that such biased smoothness is broadly
applicable in the context of nonatomic congestion
games. First, we show that on series-parallel networks
we can use smoothness to yield PoA bounds even for
diverse populations with different biases. Second, we
identify various classes of cost functions and biases
that are smooth, thereby substantially improving some
recent results from the literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feldman:2015:CSE,
author = "Michal Feldman and Ophir Friedler",
title = "Convergence to Strong Equilibrium in Network Design
Games",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "71--71",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847243",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a network design game [1] each agent seeks to
connect two nodes in a directed network at a minimal
cost. The strategies employed by each agent include all
the paths that connect that agent's two nodes (termed
origin and destination). The paths may represent roads,
internet cables, or even water pipelines. The cost of
an edge is a function of the number of agents that use
it. An agent pays the total cost of the edges in its
path, where an edge cost is a function of the number of
agents using the edge. In this work we focus on
nonincreasing edge costs, where agents impose positive
externalities on one another. Such settings emerge in
cases where agents collectively construct a network and
share the cost of the network links. Each network
design game possesses a pure Nash equilibrium (PNE): an
outcome that is sustainable against unilateral
deviations. However, a PNE is not necessarily stable
against coalitional deviations; Therefore, this is an
inadequate solution concept in settings where agents
are capable of coordinating their actions. The most
well studied solution concept that is stable against
coalitional deviations is termed strong equilibrium
(SE) [2]. An SE is an outcome where no beneficial
coalitional deviation (BCD) exists (i.e., a deviation
in which each member of the coalition strictly
decreases its cost). Epstein et al. [3] studied the
existence and efficiency of SEs in non-increasing
network design games. They showed that in a
single-origin, any-destination (SOAD) setting (i.e.,
where all agents have the same origin but may have
arbitrary destinations) with a series-parallel (SP)
network [3, 4], an SE is guaranteed to exist. Holzman
and Monderer [4] showed that this result is tight,
i.e., for any network that is not SP, there exists a
non-increasing SOAD network design game that does not
admit an SE. A natural question arises: Given an
arbitrary outcome of an SOAD network design game with
an SP network, can strategic agents converge to an SE
via BCDs? and if yes, how fast? Our contribution. We
start by showing that there exist BCD sequences that do
not converge to an SE. We then define a class of BCDs,
termed dominance based BCDs. This class is based on the
notion of domination between agents. In an SOAD
setting, we say that agent i is dominated by agent j if
there is a path from the destination of i to the
destination of j. Thus, domination is a partial order
between the agents. Dominance based BCDs proceed in the
following manner: Take any (full) order of the agents
consistent with the partial order. Every agent i, in
its turn, computes the optimal profile for itself
together with all the successive agents that can
intersect its path (thus reducing its cost). We show
that if such a coalitional deviation reduces it's cost,
then every agent in the coalition benefits from the
deviation as well. Therefore, this is a BCD. We show
that any sequence of dominance based BCDs converges to
an SE within n iterations at the most (where n is the
number of agents). Moreover, we present an algorithm
that efficiently computes dominance based BCDs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Touati:2015:CSA,
author = "Mikael Touati and Rachid El-Azouzi and Marceau
Coupechoux and Eitan Altman and Jean-Marc Kelif",
title = "Core Stable Algorithms for Coalition Games with
Complementarities and Peer Effects",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "72--75",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847244",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this short paper, we show two new algorithms for
finding stable structures in ordinal coalition
potential games. The first one is anytime and
enumerative. It performs on a graph. The second one is
a modified Deferred Acceptance Algorithm (DAA) using
counter-proposals. It finds a many-to-one matching. We
illustrate with the example of video caching from a
content creator's servers to a service provider's
servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kilcioglu:2015:RMC,
author = "Cinar Kilcioglu and Costis Maglaras",
title = "Revenue Maximization for Cloud Computing Services",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "76--76",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847245",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kulkarni:2015:DCM,
author = "Janardhan Kulkarni and Vahab Mirrokni",
title = "Dynamic Coordination Mechanisms: [Extended Abstract]",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "77--77",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847246",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tavafoghi:2015:SCU,
author = "Hamidreza Tavafoghi and Demosthenis Teneketzis",
title = "Sequential Contracts for Uncertain Electricity
Resources",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "78--81",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847247",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simhon:2015:ISI,
author = "Eran Simhon and David Starobinski",
title = "On the Impact of Sharing Information in Advance
Reservation Systems",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "82--82",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847248",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Services that allow advance reservations (AR) over the
Internet differ in the information provided to
customers about future availability of servers. In some
services, customers observe the exact number of
currently available servers prior to making decisions.
In other services, customers are only alerted when a
few servers remain available, while there are also
services in which no information whatsoever is shared
about the availability of servers. Examples for the
first case can be found in entertainment services,
where customers are allowed to choose their seats and
observe the exact number of available seats. Examples
for the second case can be found in lodging
reservations websites, such as Booking.com, that alert
potential customers only when a few available rooms are
left. Booking of airline tickets is an example of the
third case where no information is provided (typically,
customers can choose seats but only after buying a
ticket). In recent years, research on the impact of
information on different queueing systems has emerged
(see [1], for example). However, not much is known
about the impact of information in systems that allow
advance reservations. Our goal is to understand how
different information sharing policies affect the
decision of customers whether to reserve a resource in
advance or not. Towards this end, we define a game, in
which customers either reserve a resource in advance or
avoid advance reservation and take the risk that the
resource will not be available when needed. Making
advance reservation is associated with a fixed cost.
This cost can be interpreted as a reservation fee, as
the time or resources required for making the
reservation, or as the cost of financing advance
payment of the service. AR games were introduced in [2]
and further investigated in [3]. In the model
considered in that paper, customers are not informed
about the number of available servers. In contrast, in
this present work, we consider a set-up where customers
can observe the state of the system prior to making a
reservation. We first study a fully-observable game. In
this game, customers observe the exact number of
available servers. We determine the equilibrium
structure and prove the existence and uniqueness of the
equilibrium. We then consider a semi-observable game.
In this game, the provider informs customers about the
number of available servers only if this number is
smaller or equal to some threshold. We assume that
customers that are not informed realize that the number
of available servers is greater than that threshold and
take this fact under consideration upon making their
decisions. We show that, in this case, there may be
multiple equilibria and the number of equilibria
depends on the AR cost. Finally, using simulations we
show that, on average, the fraction of customers making
AR decreases as more information is provided to the
customers. More specifically, the fully observable
policy yields the lowest number of reservations. In
semi-observable policies, the fraction of customers
making advance reservation increases as the threshold
is lowered, and the best performance is achieved when
no information at all is provided. Proofs of the
results and more details about the simulation could be
found in the working paper copy. There are still many
open questions remaining about the impact of sharing
information on customers behavior in advance
reservation services. Possible directions for further
research include systems where customers have
incomplete knowledge of statistics, or systems where
the provider shares imprecise information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ceppi:2015:PPS,
author = "Sofia Ceppi and Ian Kash",
title = "Personalized Payments for Storage-as-a-Service",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "83--86",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847249",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current storage offerings provide a small number of
options in the form of fixed prices with volume
discounting. This leaves storage operators to guess how
much data customers will add over time. Instead, we
propose that the operator elicits basic information
about future usage. Such information can be used to
operate the system more efficiently. In particular, we
show how prices can be calculated that encourage
customers to accurately report the range of their
future usage while ensuring that the operator covers
his costs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Benjaafar:2015:MAC,
author = "Saif Benjaafar and Guangwen Kong and Xiang Li",
title = "Modeling and Analysis of Collaborative Consumption in
Peer-to-Peer Car Sharing",
journal = j-SIGMETRICS,
volume = "43",
number = "3",
pages = "87--90",
month = dec,
year = "2015",
CODEN = "????",
DOI = "https://doi.org/10.1145/2847220.2847250",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Dec 11 08:25:00 MST 2015",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamurthy:2016:PCC,
author = "Diwakar Krishnamurthy and Anne Koziolek",
title = "Performance Challenges, Current Bad Practices, and
Hints in {PaaS} Cloud Application Design",
journal = j-SIGMETRICS,
volume = "43",
number = "4",
pages = "3--12",
month = mar,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897356.2897358",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Feb 25 17:05:32 MST 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud computing is becoming a popular approach to
software application operation, utilizing on-demand
network access to a pool of shared computing resources,
and associated with many benefits including low-effort
provisioning, rapid elasticity, maintenance cost
reduction and pay-as-you-go billing model. However,
application deployment in the cloud is not itself a
guarantee of high performance, scalability, and related
quality attributes, which may come as a surprise to
many software engineers who detract from the importance
of proper design of a cloud application, expecting that
the cloud itself is the solution. In this paper we
analyze the issues and challenges associated with the
design of a cloud application that has to be in
compliance with given performance criteria, such as the
throughput and response time. We also analyze the
concerns related to other relevant quality criteria,
including scalability, elasticity and availability. To
support our findings, we demonstrate the identified
performance effects of the examined design decisions on
two case studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heinrich:2016:ART,
author = "Robert Heinrich",
title = "Architectural Run-time Models for Performance and
Privacy Analysis in Dynamic Cloud Applications?",
journal = j-SIGMETRICS,
volume = "43",
number = "4",
pages = "13--22",
month = mar,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897356.2897359",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Feb 25 17:05:32 MST 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Building software systems by composing third-party
cloud services promises many benefits such as
flexibility and scalability. Yet at the same time, it
leads to major challenges like limited control of third
party infrastructures and runtime changes which mostly
cannot be foreseen during development. While previous
research focused on automated adaptation, increased
complexity and heterogeneity of cloud services as well
as their limited observability, makes evident that we
need to allow operators (humans) to engage in the
adaptation process. Models are useful for involving
humans and conducting analysis, e.g. for performance
and privacy. During operation the systems often drifts
away from its design-time models. Run-time models are
kept in sync with the underlying system. However,
typical run-time models are close to an implementation
level of abstraction which impedes understandability
for humans. In this vision paper, we present the
iObserve approach to target aforementioned challenges
while considering operation-level adaptation and
development-level evolution as two mutual interwoven
processes. Central to this perception is an
architectural run-time model that is usable for
automatized adaptation and is simultaneously
comprehensible for humans during evolution. The
run-time model builds upon a technology-independent
monitoring approach. A correspondence model maintains
the semantic relationships between monitoring outcomes
and architecture models. As an umbrella a megamodel
integrates design-time models, code generation,
monitoring, and run-time model update. Currently,
iObserve covers the monitoring and analysis phases of
the MAPE control loop. We come up with a roadmap to
include planning and execution activities in
iObserve.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2016:DTD,
author = "Zheng Li and Liam O'Brien and Maria Kihl",
title = "{DoKnowMe}: Towards a Domain Knowledge-driven
Methodology for Performance Evaluation",
journal = j-SIGMETRICS,
volume = "43",
number = "4",
pages = "23--32",
month = mar,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897356.2897360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Feb 25 17:05:32 MST 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Software engineering considers performance evaluation
to be one of the key portions of software quality
assurance. Unfortunately, there seems to be a lack of
standard methodologies for performance evaluation even
in the scope of experimental computer science. Inspired
by the concept of ``instantiation'' in object-oriented
programming, we distinguish the generic performance
evaluation logic from the distributed and ad-hoc
relevant studies, and develop an abstract evaluation
methodology (by analogy of ``class'') we name Domain
Knowledge-driven Methodology (DoKnowMe). By replacing
five predefined domain-specific knowledge artefacts,
DoKnowMe could be instantiated into specific
methodologies (by analogy of ``object'') to guide
evaluators in performance evaluation of different
software and even computing systems. We also propose a
generic validation framework with four indicators (i.e.
usefulness, feasibility, effectiveness and
repeatability), and use it to validate DoKnowMe in the
Cloud services evaluation domain. Given the positive
and promising validation result, we plan to integrate
more common evaluation strategies to improve DoKnowMe
and further focus on the performance evaluation of
Cloud autoscaler systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grottke:2016:ESC,
author = "Michael Grottke and Alberto Avritzer and Daniel S.
Menasch{\'e} and Leandro P. de Aguiar and Eitan
Altman",
title = "On the Efficiency of Sampling and Countermeasures to
Critical-Infrastructure-Targeted Malware Campaigns",
journal = j-SIGMETRICS,
volume = "43",
number = "4",
pages = "33--42",
month = mar,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897356.2897361",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Feb 25 17:05:32 MST 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ensuring system survivability in the wake of advanced
persistent threats is a big challenge that the security
community is facing to ensure critical infrastructure
protection. In this paper, we define metrics and models
for the assessment of coordinated massive malware
campaigns targeting critical infrastructure sectors.
First, we develop an analytical model that allows us to
capture the effect of neighborhood on different metrics
(e.g., infection probability and contagion
probability). Then, we assess the impact of putting
operational but possibly infected nodes into
quarantine. Finally, we study the implications of
scanning nodes for early detection of malware (e.g.,
worms), accounting for false positives and false
negatives. Evaluating our methodology using an
hierarchical topology typical of factory automation
networks, we find that malware infections can be
effectively contained by using quarantine and
appropriate rates of scanning for soft impacts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rehmann:2016:PMS,
author = "Kim-Thomas Rehmann and Changyun Seo and Dongwon Hwang
and Binh Than Truong and Alexander Boehm and Dong Hun
Lee",
title = "Performance Monitoring in {SAP HANA}'s Continuous
Integration Process",
journal = j-SIGMETRICS,
volume = "43",
number = "4",
pages = "43--52",
month = mar,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897356.2897362",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Feb 25 17:05:32 MST 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Development principles such as continuous integration
and continuous delivery become increasingly popular in
the software industry. They allow for the quick and
automated build, test, and delivery of software,
thereby significantly improving the overall quality
assurance and release processes. In this paper, we show
how to apply the ideas of continuous delivery to
complex system software, as exemplified by the SAP HANA
database platform. We discuss the integration of
performance testing early in the delivery process and
the construction of services to detect and report
performance anomalies in a continuous integration
process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nambiar:2016:MDS,
author = "Manoj Nambiar and Ajay Kattepur and Gopal Bhaskaran
and Rekha Singhal and Subhasri Duttagupta",
title = "Model Driven Software Performance Engineering: Current
Challenges and Way Ahead",
journal = j-SIGMETRICS,
volume = "43",
number = "4",
pages = "53--62",
month = mar,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2897356.2897363",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Feb 25 17:05:32 MST 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance model solvers and simulation engines have
been around for more than two decades. Yet, performance
modeling has not received wide acceptance in the
software industry, unlike pervasion of modeling and
simulation tools in other industries. This paper
explores underlying causes and looks at challenges that
need to be overcome to increase utility of performance
modeling, in order to make critical decisions on
software based products and services. Multiple
real-world case studies and examples are included to
highlight our viewpoints on performance engineering.
Finally, we conclude with some possible directions the
performance modeling community could take, for better
predictive capabilities required for industrial use.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2016:DMM,
author = "David Gamarnik and John N. Tsitsiklis and Martin
Zubeldia",
title = "Delay, Memory, and Messaging Tradeoffs in Distributed
Service Systems",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "1--12",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the following distributed service model:
jobs with unit mean, exponentially distributed, and
independent processing times arrive as a Poisson
process of rate $ \lambda N $, with $ 0 < \lambda < 1
$, and are immediately dispatched to one of several
queues associated with $N$ identical servers with unit
processing rate. We assume that the dispatching
decisions are made by a central dispatcher endowed with
a finite memory, and with the ability to exchange
messages with the servers. We study the fundamental
resource requirements (memory bits and message exchange
rate), in order to drive the expected steady-state
queueing delay of a typical job to zero, as $N$
increases. We propose a certain policy and establish
(using a fluid limit approach) that it drives the delay
to zero when either (i) the message rate grows
superlinearly with N, or (ii) the memory grows
superlogarithmically with N. Moreover, we show that any
policy that has a certain symmetry property, and for
which neither condition (i) or (ii) holds, results in
an expected queueing delay which is bounded away from
zero. Finally, using the fluid limit approach once
more, we show that for any given $ \alpha > 0$ (no
matter how small), if the policy only uses a linear
message rate $ \alpha N$, the resulting asymptotic (as
$ N \to \infty $) expected queueing delay is positive
but upper bounded, uniformly over all $ \lambda > 1$.
This is a significant improvement over the popular
``power-of-d-choices'' policy, which has a limiting
expected delay that grows as $ \log [ - (1 / (1 -
\lambda))]$ when $ \lambda > 1.$",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maguluri:2016:OHT,
author = "Siva Theja Maguluri and Sai Kiran Burle and R.
Srikant",
title = "Optimal Heavy-Traffic Queue Length Scaling in an
Incompletely Saturated Switch",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "13--24",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901466",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an input queued switch operating under the
MaxWeight scheduling algorithm. This system is
interesting to study because it is a model for Internet
routers and data center networks. Recently, it was
shown that the MaxWeight algorithm has optimal
heavy-traffic queue length scaling when all ports are
uniformly saturated. Here we consider the case where a
fraction of the ports are saturated and others are not
(which we call the incompletely saturated case), and
also the case where the rates at which the ports are
saturated can be different. We use a recently developed
drift technique to show that the heavy-traffic queue
length under the MaxWeight scheduling algorithm has
optimal scaling with respect to the switch size even in
these cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zeng:2016:NSC,
author = "Yun Zeng and Augustin Chaintreau and Don Towsley and
Cathy H. Xia",
title = "A Necessary and Sufficient Condition for Throughput
Scalability of Fork and Join Networks with Blocking",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "25--36",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901470",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to emerging applications such as cloud computing
and big data analytics, modern information processing
systems are growing increasingly large and complex. A
critical issue concerns the throughput performance as
the system grows in size. This paper models distributed
information processing systems as fork and join
queueing networks with blocking. We identify necessary
and sufficient conditions for throughput scalability of
such fork and join networks as they grow in size.
Previous studies have either focused on special
structured networks such as tandem or tree networks, or
provided only necessary conditions for throughput
scalability. In this paper, we show that such necessary
conditions are not sufficient. We present a key
topological concept called ``minimum level'' of the
underlying graph, and develop lower and upper bounds
for the throughput of arbitrary FJQN/Bs. The bounds
depend on network degree, minimum level, deterministic
cycle time, buffer sizes, and service time
distributions, but not on network size. We show that
level-boundedness and degree-boundedness are necessary
and sufficient conditions to guarantee that the
throughput of an FJQN/B is bounded away from zero as
network size goes to infinity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garetto:2016:GTB,
author = "Michele Garetto and Emilio Leonardi and Giovanni Luca
Torrisi",
title = "Generalized Threshold-Based Epidemics in Random
Graphs: The Power of Extreme Values",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "37--50",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901455",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Bootstrap percolation is a well-known activation
process in a graph, in which a node becomes active when
it has at least r active neighbors. Such process,
originally studied on regular structures, has been
recently investigated also in the context of random
graphs, where it can serve as a simple model for a wide
variety of cascades, such as the spreading of ideas,
trends, viral contents, etc. over large social
networks. In particular, it has been shown that in
G(n,p) the final active set can exhibit a phase
transition for a sub-linear number of seeds. In this
paper, we propose a unique framework to study similar
sub-linear phase transitions for a much broader class
of graph models and epidemic processes. Specifically,
we consider (i) a generalized version of bootstrap
percolation in G(n,p) with random activation thresholds
and random node-to-node influences; (ii) different
random graph models, including graphs with given degree
sequence and graphs with community structure (block
model). The common thread of our work is to show the
surprising sensitivity of the critical seed set size to
extreme values of distributions, which makes some
systems dramatically vulnerable to large-scale
outbreaks. We validate our results running simulation
on both synthetic and real graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchnik:2016:RRG,
author = "Eliav Buchnik and Edith Cohen",
title = "Reverse Ranking by Graph Structure: Model and Scalable
Algorithms",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "51--62",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901458",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distances in a network capture relations between nodes
and are the basis of centrality, similarity, and
influence measures. Often, however, the relevance of a
node u to a node v is more precisely measured not by
the magnitude of the distance, but by the number of
nodes that are closer to v than u. That is, by the rank
of u in an ordering of nodes by increasing distance
from v. We identify and address fundamental challenges
in rank-based graph mining. We first consider
single-source computation of reverse-ranks and design a
``Dijkstra-like'' algorithm which computes nodes in
order of increasing approximate reverse rank while only
traversing edges adjacent to returned nodes. We then
define reverse-rank influence, which naturally extends
reverse nearest neighbors influence [Korn and
Muthukrishnan 2000] and builds on a well studied
distance-based influence. We present near-linear
algorithms for greedy approximate reverse-rank
influence maximization. The design relies on our
single-source algorithm. Our algorithms utilize
near-linear preprocessing of the network to compute
all-distance sketches. As a contribution of independent
interest, we present a novel algorithm for computing
these sketches, which have many other applications, on
multi-core architectures. We complement our algorithms
by establishing the hardness of computing exact
reverse-ranks for a single source and exact
reverse-rank influence. This implies that when using
near-linear algorithms, the small relative errors we
obtain are the best we can currently hope for. Finally,
we conduct an experimental evaluation on graphs with
tens of millions of edges, demonstrating both
scalability and accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cullina:2016:IAC,
author = "Daniel Cullina and Negar Kiyavash",
title = "Improved Achievability and Converse Bounds for
{Erd{\H{o}}s--R{\'e}nyi} Graph Matching",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "63--72",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901460",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of perfectly recovering the
vertex correspondence between two correlated
Erd{\H{o}}s--R{\'e}nyi (ER) graphs. For a pair of
correlated graphs on the same vertex set, the
correspondence between the vertices can be obscured by
randomly permuting the vertex labels of one of the
graphs. In some cases, the structural information in
the graphs allow this correspondence to be recovered.
We investigate the information-theoretic threshold for
exact recovery, i.e. the conditions under which the
entire vertex correspondence can be correctly recovered
given unbounded computational resources. Pedarsani and
Grossglauser provided an achievability result of this
type. Their result establishes the scaling dependence
of the threshold on the number of vertices. We improve
on their achievability bound. We also provide a
converse bound, establishing conditions under which
exact recovery is impossible. Together, these establish
the scaling dependence of the threshold on the level of
correlation between the two graphs. The converse and
achievability bounds differ by a factor of two for
sparse, significantly correlated graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2016:BMT,
author = "Mor Harchol-Balter",
title = "A Better Model for Task Assignment in Server Farms:
How Replication can Help",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "73--73",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An age-old problem in the design of server farms is
the choice of the task assignment policy. This is the
algorithm that determines how to assign incoming jobs
to servers. Popular policies include Round-Robin
assignment, Join-the-Shortest-Queue,
Join-Queue-with-Least-Work, and so on. While much
research has studied assignment policies, little has
taken into account server-side variability --- the fact
that the server we choose might be temporarily and
unpredictably slow. We show that when server-side
variability dominates runtime, replication of jobs can
be very beneficial. We introduce the Replication-d
algorithm that replicates each arrival to d servers
chosen at random, where the job is considered ``done''
as soon as the first replica completes. We provide an
exact closed-form analysis of Replication-d. We next
introduce a much more general model, one which takes
both the inherent job size distribution and the
server-side variability into account. This is a
departure from traditional queueing models which only
allow for one ``size'' distribution. We propose and
analyze a new task assignment policy,
Replicate-Idle-Queue (RIQ), which is designed to
perform well given these dual sources of variability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Venkatakrishnan:2016:CCS,
author = "Shaileshh Bojja Venkatakrishnan and Mohammad Alizadeh
and Pramod Viswanath",
title = "Costly Circuits, Submodular Schedules and Approximate
{Carath{\'e}odory} Theorems",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "75--88",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901479",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hybrid switching --- in which a high bandwidth circuit
switch (optical or wireless) is used in conjunction
with a low bandwidth packet switch --- is a promising
alternative to interconnect servers in today's large
scale data centers. Circuit switches offer a very high
link rate, but incur a non-trivial reconfiguration
delay which makes their scheduling challenging. In this
paper, we demonstrate a lightweight, simple and
nearly-optimal scheduling algorithm that trades-off
reconfiguration costs with the benefits of
reconfiguration that match the traffic demands. Seen
alternatively, the algorithm provides a fast and
approximate solution towards a constructive version of
Carath{\'e}odory's Theorem for the Birkhoff polytope.
The algorithm also has strong connections to submodular
optimization, achieves a performance at least half that
of the optimal schedule and strictly outperforms state
of the art in a variety of traffic demand settings.
These ideas naturally generalize: we see that indirect
routing leads to exponential connectivity; this is
another phenomenon of the power of multi-hop routing,
distinct from the well-known load balancing effects.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Narayanan:2016:RLT,
author = "Shankaranarayanan Puzhavakath Narayanan and Yun Seong
Nam and Ashiwan Sivakumar and Balakrishnan
Chandrasekaran and Bruce Maggs and Sanjay Rao",
title = "Reducing Latency Through Page-aware Management of
{Web} Objects by Content Delivery Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "89--100",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901472",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As popular web sites turn to content delivery networks
(CDNs) for full-site delivery, there is an opportunity
to improve the end-user experience by optimizing the
delivery of entire web pages, rather than just
individual objects. In particular, this paper explores
page-structure-aware strategies for placing objects in
CDN cache hierarchies. The key idea is that the objects
in a web page that have the largest impact on page
latency should be served out of the closest or fastest
caches in the hierarchy. We present schemes for
identifying these objects and develop mechanisms to
ensure that they are served with higher priority by the
CDN, while balancing traditional CDN concerns such as
optimizing the delivery of popular objects and
minimizing bandwidth costs. To establish a baseline for
evaluating improvements in page latencies, we collect
and analyze publicly visible HTTP headers that reveal
the distribution of objects among the various levels of
a major CDN's cache hierarchy. Through extensive
experiments on 83 real-world web pages, we show that
latency reductions of over 100 ms can be obtained for
30\% of the popular pages, with even larger reductions
for the less popular pages. Using anonymized server
logs provided by the CDN, we show the feasibility of
reducing capacity and staleness misses of critical
objects by 60\% with minimal increase in overall miss
rates, and bandwidth overheads of under 0.02\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferragut:2016:OTC,
author = "Andr{\'e}s Ferragut and Ismael Rodriguez and Fernando
Paganini",
title = "Optimizing {TTL} Caches under Heavy-Tailed Demands",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "101--112",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901459",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we analyze the hit performance of cache
systems that receive file requests with general arrival
distributions and different popularities. We consider
timer-based (TTL) policies, with differentiated timers
over which we optimize. The optimal policy is shown to
be related to the monotonicity of the hazard rate
function of the inter-arrival distribution. In
particular for decreasing hazard rates, timer policies
outperform the static policy of caching the most
popular contents. We provide explicit solutions for the
optimal policy in the case of Pareto-distributed
inter-request times and a Zipf distribution of file
popularities, including a compact fluid
characterization in the limit of a large number of
files. We compare it through simulation with classical
policies, such as least-recently-used and discuss its
performance. Finally, we analyze extensions of the
optimization framework to a line network of caches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ioannidis:2016:ACN,
author = "Stratis Ioannidis and Edmund Yeh",
title = "Adaptive Caching Networks with Optimality Guarantees",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "113--124",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901467",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of optimal content placement over
a network of caches, a problem naturally arising in
several networking applications, including ICNs, CDNs,
and P2P systems. Given a demand of content request
rates and paths followed, we wish to determine the
content placement that maximizes the expected caching
gain, i.e., the reduction of routing costs due to
intermediate caching. The offline version of this
problem is NP-hard and, in general, the demand and
topology may be a priori unknown. Hence, a distributed,
adaptive, constant approximation content placement
algorithm is desired. We show that path replication, a
simple algorithm frequently encountered in literature,
can be arbitrarily suboptimal when combined with
traditional eviction policies, like LRU, LFU, or FIFO.
We propose a distributed, adaptive algorithm that
performs stochastic gradient ascent on a concave
relaxation of the expected caching gain, and constructs
a probabilistic content placement within 1-1/e factor
from the optimal, in expectation. Motivated by our
analysis, we also propose a novel greedy eviction
policy to be used with path replication, and show
through numerical evaluations that both algorithms
significantly outperform path replication with
traditional eviction policies over a broad array of
network topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacquet:2016:BMT,
author = "Philippe Jacquet",
title = "Breathing Mankind Thoughts",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "125--125",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mankind has never been connected as it is now and as
it will be tomorrow. Nowadays thanks to the rise of
social networks such as Tweeter and Facebook, we can
follow in real time the thought of millions of people.
In fact we can almost feel the thoughts of a whole
humanity and maybe project ourselves in a position
where we could predict the major trends in the
collective behavior of this humanity. However such an
ambitious aim would require considerable resources in
processing and networking which may be far from
affordable. Indeed trends and topics are carried in a
multiple of small texts written in various language and
vocabularies like an hologram carries information in a
dispersed way. Their capture and classification pose
serious problems of data mining and analytics.
Processes based on pure semantic analysis would require
too much processing power and memory. We will present
alternative methods based on string complexity also
inspired on geolocalization in wireless networks which
saves processing power by several order of magnitude.
The ultimate goal is to detect when people are thinking
about the very same topics before they become aware.
Beyond the problem of topic detection and
classification one must also estimate the potential of
an isolated topic to become a lasting trend. In other
word one must probe the topic foundations, for example
by challenging how trustworthy are its sources.
Designing an efficient source finder algorithm is
indissociable with building realistic models about
topic propagation. If we suppose that topics propagate
inside communities via the followers-followees links,
the propagation is highly amplified by the unbalances
in the graph topology. It is established that
dominating and semi dominating nodes such as the CNN
Tweeter site are the main accelerator of topic
propagation. The difficulty is to find the actual
source of a topic beyond those screening nodes and the
search is prone to false positive and true negative
effects. In fact we will show that finding a source of
topic is similar to finding a common ancestor in a
Darwin channel where spurious mutations complicate the
task.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shamsi:2016:UCU,
author = "Zain Shamsi and Dmitri Loguinov",
title = "Unsupervised Clustering Under Temporal Feature
Volatility in Network Stack Fingerprinting",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "127--138",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901449",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Maintaining and updating signature databases is a
tedious task that normally requires a large amount of
user effort. The problem becomes harder when features
can be distorted by observation noise, which we call
volatility. To address this issue, we propose
algorithms and models to automatically generate
signatures in the presence of noise, with a focus on
stack fingerprinting, which is a research area that
aims to discover the operating system (OS) of remote
hosts using TCP/IP packets. Armed with this framework,
we construct a database with 420 network stacks, label
the signatures, develop a robust classifier for this
database, and fingerprint 66M visible webservers on the
Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2016:NBF,
author = "Haipeng Dai and Yuankun Zhong and Alex X. Liu and Wei
Wang and Meng Li",
title = "Noisy {Bloom} Filters for Multi-Set Membership
Testing",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "139--151",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901451",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is on designing a compact data structure
for multi-set membership testing allowing fast set
querying. Multi-set membership testing is a fundamental
operation for computing systems and networking
applications. Most existing schemes for multi-set
membership testing are built upon Bloom filter, and
fall short in either storage space cost or query speed.
To address this issue, in this paper we propose Noisy
Bloom Filter (NBF) and Error Corrected Noisy Bloom
Filter (NBF-E) for multi-set membership testing. For
theoretical analysis, we optimize their classification
failure rate and false positive rate, and present
criteria for selection between NBF and NBF-E. The key
novelty of NBF and NBF-E is to store set ID information
in a compact but noisy way that allows fast recording
and querying, and use denoising method for querying.
Especially, NBF-E incorporates asymmetric
error-correcting coding technique into NBF to enhance
the resilience of query results to noise by revealing
and leveraging the asymmetric error nature of query
results. To evaluate NBF and NBF-E in comparison with
prior art, we conducted experiments using real-world
network traces. The results show that NBF and NBF-E
significantly advance the state-of-the-art on multi-set
membership testing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fanti:2016:RSO,
author = "Giulia Fanti and Peter Kairouz and Sewoong Oh and
Kannan Ramchandran and Pramod Viswanath",
title = "Rumor Source Obfuscation on Irregular Trees",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "153--164",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901471",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Anonymous messaging applications have recently gained
popularity as a means for sharing opinions without fear
of judgment or repercussion. Messages in these
applications propagate anonymously (without authorship
metadata) over a network that is typically defined by
social connections or physical proximity. However,
recent advances in rumor source detection show that the
source of such an anonymous message can be inferred by
statistical inference attacks. Adaptive diffusion was
recently proposed as a solution that achieves optimal
source obfuscation over regular trees. However, in real
social networks, node degrees differ from node to node,
and adaptive diffusion can be significantly
sub-optimal. This gap increases as the degrees become
more irregular. In order to quantify this gap, we model
the underlying network as coming from standard
branching processes with i.i.d. degree distributions.
Building upon the analysis techniques from branching
processes, we give an analytical characterization of
the dependence between the probability of detection
achieved by adaptive diffusion and the degree
distribution. Further, this analysis provides a key
insight: passing a rumor to a friend who has many
friends makes the source more ambiguous. This leads to
a new family of protocols that we call Preferential
Attachment Adaptive Diffusion (PAAD). When messages are
propagated according to PAAD, we give both the MAP
estimator for finding the source and also an analysis
of the probability of detection achieved by this
adversary. The analytical results are not directly
comparable, since the adversary's observed information
has a different distribution under adaptive diffusion
than under PAAD. Instead, we present results from
numerical experiments that suggest that PAAD achieves a
lower probability of detection, at the cost of
increased communication for coordination.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Avrachenkov:2016:IOL,
author = "Konstantin Avrachenkov and Bruno Ribeiro and Jithin K.
Sreedharan",
title = "Inference in {OSNs} via Lightweight Partial Crawls",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "165--177",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Are Online Social Network (OSN) A users more likely to
form friendships with those with similar attributes? Do
users at an OSN B score content more favorably than OSN
C users? Such questions frequently arise in the context
of Social Network Analysis (SNA) but often crawling an
OSN network via its Application Programming Interface
(API) is the only way to gather data from a third
party. To date, these partial API crawls are the
majority of public datasets and the synonym of lack of
statistical guarantees in incomplete-data comparisons,
severely limiting SNA research progress. Using
regenerative properties of the random walks, we propose
estimation techniques based on short crawls that have
proven statistical guarantees. Moreover, our short
crawls can be implemented in massively distributed
algorithms. We also provide an adaptive crawler that
makes our method parameter-free, significantly
improving our statistical guarantees. We then derive
the Bayesian approximation of the posterior of the
estimates, and in addition, obtain an estimator for the
expected value of node and edge statistics in an
equivalent configuration model or Chung-Lu random graph
model of the given network (where nodes are connected
randomly) and use it as a basis for testing null
hypotheses. The theoretical results are supported with
simulations on a variety of real-world networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gabielkov:2016:SCW,
author = "Maksym Gabielkov and Arthi Ramachandran and Augustin
Chaintreau and Arnaud Legout",
title = "Social Clicks: What and Who Gets Read on {Twitter}?",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "179--192",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901462",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Online news domains increasingly rely on social media
to drive traffic to their websites. Yet we know
surprisingly little about how a social media
conversation mentioning an online article actually
generates clicks. Sharing behaviors, in contrast, have
been fully or partially available and scrutinized over
the years. While this has led to multiple assumptions
on the diffusion of information, each assumption was
designed or validated while ignoring actual clicks. We
present a large scale, unbiased study of social clicks
---that is also the first data of its kind---gathering
a month of web visits to online resources that are
located in 5 leading news domains and that are
mentioned in the third largest social media by web
referral (Twitter). Our dataset amounts to 2.8 million
shares, together responsible for 75 billion potential
views on this social media, and 9.6 million actual
clicks to 59,088 unique resources. We design a
reproducible methodology and carefully correct its
biases. As we prove, properties of clicks impact
multiple aspects of information diffusion, all
previously unknown: (i) Secondary resources, that are
not promoted through headlines and are responsible for
the long tail of content popularity, generate more
clicks both in absolute and relative terms; (ii) Social
media attention is actually long-lived, in contrast
with temporal evolution estimated from shares or
receptions; (iii) The actual influence of an
intermediary or a resource is poorly predicted by their
share count, but we show how that prediction can be
made more precise.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2016:UPO,
author = "Niangjun Chen and Joshua Comden and Zhenhua Liu and
Anshul Gandhi and Adam Wierman",
title = "Using Predictions in Online Optimization: Looking
Forward with an Eye on the Past",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "193--206",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901464",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider online convex optimization (OCO) problems
with switching costs and noisy predictions. While the
design of online algorithms for OCO problems has
received considerable attention, the design of
algorithms in the context of noisy predictions is
largely open. To this point, two promising algorithms
have been proposed: Receding Horizon Control (RHC) and
Averaging Fixed Horizon Control (AFHC). The comparison
of these policies is largely open. AFHC has been shown
to provide better worst-case performance, while RHC
outperforms AFHC in many realistic settings. In this
paper, we introduce a new class of policies, Committed
Horizon Control (CHC), that generalizes both RHC and
AFHC. We provide average-case analysis and
concentration results for CHC policies, yielding the
first analysis of RHC for OCO problems with noisy
predictions. Further, we provide explicit results
characterizing the optimal CHC policy as a function of
properties of the prediction noise, e.g., variance and
correlation structure. Our results provide a
characterization of when AFHC outperforms RHC and vice
versa, as well as when other CHC policies outperform
both RHC and AFHC.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bresler:2016:CFL,
author = "Guy Bresler and Devavrat Shah and Luis Filipe Voloch",
title = "Collaborative Filtering with Low Regret",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "207--220",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901469",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is much empirical evidence that item-item
collaborative filtering works well in practice.
Motivated to understand this, we provide a framework to
design and analyze various recommendation algorithms.
The setup amounts to online binary matrix completion,
where at each time a random user requests a
recommendation and the algorithm chooses an entry to
reveal in the user's row. The goal is to minimize
regret, or equivalently to maximize the number of +1
entries revealed at any time. We analyze an item-item
collaborative filtering algorithm that can achieve
fundamentally better performance compared to user-user
collaborative filtering. The algorithm achieves good
``cold-start'' performance (appropriately defined) by
quickly making good recommendations to new users about
whom there is little information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2016:ALD,
author = "Jia Liu",
title = "Achieving Low-Delay and Fast-Convergence in Stochastic
Network Optimization: a {Nesterovian} Approach",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "221--234",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901474",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the rapid growth of mobile data demands, there
have been significant interests in stochastic resource
control and optimization for wireless networks.
Although significant advances have been made in
stochastic network optimization theory, to date, most
of the existing approaches are plagued by either slow
convergence or unsatisfactory delay performances. To
address these challenges, in this paper, we develop a
new stochastic network optimization framework inspired
by the Nesterov accelerated gradient method. We show
that our proposed Nesterovian approach offers
utility-optimality, fast-convergence, and significant
delay reduction in stochastic network optimization. Our
contributions in this paper are three-fold: (i) we
propose a Nesterovian joint congestion control and
routing/scheduling framework for both single-hop and
multi-hop wireless networks; (ii) we establish the
utility optimality and queueing stability of the
proposed Nesterovian method, and analytically
characterize its delay reduction and convergence speed;
and (iii) we show that the proposed Nesterovian
approach offers a three-way performance control between
utility-optimality, delay, and convergence.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zheng:2016:VCV,
author = "Liang Zheng and Carlee Joe-Wong and Christopher G.
Brinton and Chee Wei Tan and Sangtae Ha and Mung
Chiang",
title = "On the Viability of a Cloud Virtual Service Provider",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "235--248",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901452",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud service providers (CSPs) often face highly
dynamic user demands for their resources, which can
make it difficult for them to maintain consistent
quality-of-service. Some CSPs try to stabilize user
demands by offering sustained-use discounts to jobs
that consume more instance-hours per month. These
discounts present an opportunity for users to pool
their usage together into a single ``job.'' In this
paper, we examine the viability of a middleman, the
cloud virtual service provider (CVSP), that rents cloud
resources from a CSP and then resells them to users. We
show that the CVSP's business model is only viable if
the average job runtimes and thresholds for
sustained-use discounts are sufficiently small;
otherwise, the CVSP cannot simultaneously maintain low
job waiting times while qualifying for a sustained-use
discount. We quantify these viability conditions by
modeling the CVSP's job scheduling and then use this
model to derive users' utility-maximizing demands and
the CVSP's profit-maximizing price, as well as the
optimal number of instances that the CVSP should rent
from the CSP. We verify our results on a one-month
trace from Google's production compute cluster, through
which we first validate our assumptions on the job
arrival and runtime distributions, and then show that
the CVSP is viable under these workload traces. Indeed,
the CVSP can earn a positive profit without
significantly impacting the CSP's revenue, indicating
that the CSP and CVSP can coexist in the cloud
market.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2016:VPS,
author = "Weina Wang and Lei Ying and Junshan Zhang",
title = "The Value of Privacy: Strategic Data Subjects,
Incentive Mechanisms and Fundamental Limits",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "249--260",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901461",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the value of data privacy in a game-theoretic
model of trading private data, where a data collector
purchases private data from strategic data subjects
(individuals) through an incentive mechanism. The
private data of each individual represents her
knowledge about an underlying state, which is the
information that the data collector desires to learn.
Different from most of the existing work on
privacy-aware surveys, our model does not assume the
data collector to be trustworthy. Then, an individual
takes full control of its own data privacy and reports
only a privacy-preserving version of her data. In this
paper, the value of \epsilon units of privacy is
measured by the minimum payment of all nonnegative
payment mechanisms, under which an individual's best
response at a Nash equilibrium is to report the data
with a privacy level of \epsilon. The higher \epsilon
is, the less private the reported data is. We derive
lower and upper bounds on the value of privacy which
are asymptotically tight as the number of data subjects
becomes large. Specifically, the lower bound assures
that it is impossible to use less amount of payment to
buy \epsilon units of privacy, and the upper bound is
given by an achievable payment mechanism that we
designed. Based on these fundamental limits, we further
derive lower and upper bounds on the minimum total
payment for the data collector to achieve a given
learning accuracy target, and show that the total
payment of the designed mechanism is at most one
individual's payment away from the minimum.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2016:IDM,
author = "Yuanjie Li and Haotian Deng and Jiayao Li and Chunyi
Peng and Songwu Lu",
title = "Instability in Distributed Mobility Management:
Revisiting Configuration Management in {3G\slash 4G}
Mobile Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "261--272",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901457",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mobility support is critical to offering seamless data
service to mobile devices in 3G/4G cellular networks.
To accommodate policy requests by users and carriers,
micro-mobility management scheme among cells (i.e.,
handoff) is designated to be configurable. Each cell
and mobile device can configure or even customize its
own handoff procedure. In this paper, we examine the
handoff misconfiguration issues in 3G/4G networks. We
show that they may incur handoff instability in the
form of persistent loops, where the device oscillates
between cells even without radio-link and location
changes. Such instability is mainly triggered by
uncoordinated parameter configurations and inconsistent
decision logic in the hand- off procedure. It can
degrade user data performance, incur excessive
signaling overhead, and violate network's expected
handoff goals. We derive the instability conditions,
and validate them on two major US mobile carrier
networks. We further design a soft- ware tool for
automatic loop detection, and run it over operational
networks. We discuss possible fixes to such
uncoordinated configurations among devices and cells.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ludwig:2016:TSN,
author = "Arne Ludwig and Szymon Dudyzc and Matthias Rost and
Stefan Schmid",
title = "Transiently Secure Network Updates",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "273--284",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer networks have become a critical
infrastructure. Especially in shared environments such
as datacenters it is important that a correct,
consistent and secure network operation is guaranteed
at any time, even during routing policy updates. In
particular, at no point in time should it be possible
for packets to bypass security critical waypoints (such
as a firewall or IDS) or to be forwarded along loops.
This paper studies the problem of how to change routing
policies in a transiently consistent manner.
Transiently consistent network updates have been
proposed as a fast and resource efficient alternative
to per-packet consistent updates. Our main result is a
negative one: we show that there are settings where the
two basic properties waypoint enforcement and
loop-freedom cannot be satisfied simultaneously. Even
worse, we rigorously prove that deciding whether a
waypoint enforcing, loop-free network update schedule
exists is NP-hard. These results hold for both kinds of
loop-freedom used in the literature: strong and relaxed
loop-freedom. This paper also presents optimized, exact
mixed integer programs to compute optimal update
schedules. We report on extensive simulation results
and initiate the discussion of scenarios where multiple
waypoints need to be ensured (also known as service
chains).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ying:2016:AEM,
author = "Lei Ying",
title = "On the Approximation Error of Mean-Field Models",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "285--297",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901463",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mean-field models have been used to study large-scale
and complex stochastic systems, such as large-scale
data centers and dense wireless networks, using simple
deterministic models (dynamical systems). This paper
analyzes the approximation error of mean-field models
for continuous-time Markov chains (CTMC), and focuses
on mean-field models that are represented as
finite-dimensional dynamical systems with a unique
equilibrium point. By applying Stein's method and the
perturbation theory, the paper shows that under some
mild conditions, if the mean-field model is globally
asymptotically stable and locally exponentially stable,
the mean square difference between the stationary
distribution of the stochastic system with size M and
the equilibrium point of the corresponding mean-field
system is O(1/M). The result of this paper establishes
a general theorem for establishing the convergence and
the approximation error (i.e., the rate of convergence)
of a large class of CTMCs to their mean-field limit by
mainly looking into the stability of the mean-field
model, which is a deterministic system and is often
easier to analyze than the CTMCs. Two applications of
mean-field models in data center networks are presented
to demonstrate the novelty of our results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2016:DIC,
author = "Bo Jiang and Daniel R. Figueiredo and Bruno Ribeiro
and Don Towsley",
title = "On the Duration and Intensity of Competitions in
Nonlinear {P{\'o}lya} Urn Processes with Fitness",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "299--310",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901475",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cumulative advantage (CA) refers to the notion that
accumulated resources foster the accumulation of
further resources in competitions, a phenomenon that
has been empirically observed in various contexts. The
oldest and arguably simplest mathematical model that
embodies this general principle is the P{\'o}lya urn
process, which finds applications in a myriad of
problems. The original model captures the dynamics of
competitions between two equally fit agents under
linear CA effects, which can be readily generalized to
incorporate different fitnesses and nonlinear CA
effects. We study two statistics of competitions under
the generalized model, namely duration (i.e., time of
the last tie) and intensity (i.e., number of ties). We
give rigorous mathematical characterizations of the
tail distributions of both duration and intensity under
the various regimes for fitness and nonlinearity, which
reveal very interesting behaviors. For example, fitness
superiority induces much shorter competitions in the
sublinear regime while much longer competitions in the
superlinear regime. Our findings can shed light on the
application of P{\'o}lya urn processes in more general
contexts where fitness and nonlinearity may be
present.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jonckheere:2016:AIL,
author = "Matthieu Jonckheere and Balakrishna J. Prabhu",
title = "Asymptotics of Insensitive Load Balancing and Blocking
Phases",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "311--322",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901454",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load balancing with various types of load information
has become a key component of modern communication and
information systems. In many systems, characterizing
precisely the blocking probability allows to establish
a performance trade-off between delay and losses. We
address here the problem of giving robust performance
bounds based on the study of the asymptotic behavior of
the insensitive load balancing schemes when the number
of servers and the load scales jointly. These schemes
have the desirable property that the stationary
distribution of the resulting stochastic network
depends on the distribution of job sizes only through
its mean. It was shown that they give good estimates of
performance indicators for systems with finite buffers,
generalizing henceforth Erlang's formula whereas
optimal policies are already theoretically and
computationally out of reach for networks of moderate
size. We study a single class of traffic acting on a
symmetric set of processor sharing queues with finite
buffers and we consider the case where the load scales
with the number of servers.We characterize the response
of symmetric systems under those schemes at different
scales and show that three amplitudes of deviations can
be identified according to whether $ \rho < 1 $, $ \rho
= 1 $, and $ \rho > 1 $. A central limit scaling takes
place for a sub-critical load; for $ \rho = 1 $, the
number of free servers scales like $ n^{ \theta /
\theta + 1} $ ($ \theta $ being the buffer depth and
$n$ being the number of servers) and is of order 1 for
super-critical loads. This further implies the
existence of different phases for the blocking
probability. Before a (refined) critical load $
\rho_c(n) = 1 - a n^{- \theta / \theta + 1}$, the
blocking is exponentially small and becomes of order $
n^{- \theta / \theta + 1}$ at $ \rho_c (n)$. This
generalizes the well-known Quality and Efficiency
Driven (QED) regime or Halfin--Whitt regime for a
one-dimensional queue, and leads to a generalized
staffing rule for a given target blocking
probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2016:ULV,
author = "Kevin K. Chang and Abhijith Kashyap and Hasan Hassan
and Saugata Ghose and Kevin Hsieh and Donghyuk Lee and
Tianshi Li and Gennady Pekhimenko and Samira Khan and
Onur Mutlu",
title = "Understanding Latency Variation in Modern {DRAM}
Chips: Experimental Characterization, Analysis, and
Optimization",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "323--336",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901453",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Long DRAM latency is a critical performance bottleneck
in current systems. DRAM access latency is defined by
three fundamental operations that take place within the
DRAM cell array: (i) activation of a memory row, which
opens the row to perform accesses; (ii) precharge,
which prepares the cell array for the next memory
access; and (iii) restoration of the row, which
restores the values of cells in the row that were
destroyed due to activation. There is significant
latency variation for each of these operations across
the cells of a single DRAM chip due to irregularity in
the manufacturing process. As a result, some cells are
inherently faster to access, while others are
inherently slower. Unfortunately, existing systems do
not exploit this variation. The goal of this work is to
(i) experimentally characterize and understand the
latency variation across cells within a DRAM chip for
these three fundamental DRAM operations, and (ii)
develop new mechanisms that exploit our understanding
of the latency variation to reliably improve
performance. To this end, we comprehensively
characterize 240 DRAM chips from three major vendors,
and make several new observations about latency
variation within DRAM. We find that (i) there is large
latency variation across the cells for each of the
three operations; (ii) variation characteristics
exhibit significant spatial locality: slower cells are
clustered in certain regions of a DRAM chip; and (iii)
the three fundamental operations exhibit different
reliability characteristics when the latency of each
operation is reduced. Based on our observations, we
propose Flexible-LatencY DRAM (FLY-DRAM), a mechanism
that exploits latency variation across DRAM cells
within a DRAM chip to improve system performance. The
key idea of FLY-DRAM is to exploit the spatial locality
of slower cells within DRAM, and access the faster DRAM
regions with reduced latencies for the fundamental
operations. Our evaluations show that FLY-DRAM improves
the performance of a wide range of applications by
13.3\%, 17.6\%, and 19.5\%, on average, for each of the
three different vendors' real DRAM chips, in a
simulated 8-core system. We conclude that the
experimental characterization and analysis of latency
variation within modern DRAM, provided by this work,
can lead to new techniques that improve DRAM and system
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yaniv:2016:HDC,
author = "Idan Yaniv and Dan Tsafrir",
title = "Hash, Don't Cache (the Page Table)",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "337--350",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901456",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/hash.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Radix page tables as implemented in the x86-64
architecture incur a penalty of four memory references
for address translation upon each TLB miss. These 4
references become 24 in virtualized setups, accounting
for 5\%--90\% of the runtime and thus motivating chip
vendors to incorporate page walk caches (PWCs).
Counterintuitively, an ISCA 2010 paper found that radix
page tables with PWCs are superior to hashed page
tables, yielding up to 5x fewer DRAM accesses per page
walk. We challenge this finding and show that it is the
result of comparing against a suboptimal hashed
implementation---that of the Itanium architecture. We
show that, when carefully optimized, hashed page tables
in fact outperform existing PWC-aided x86-64 hardware,
shortening benchmark runtimes by 1\%--27\% and
6\%--32\% in bare-metal and virtualized setups, without
resorting to PWCs. We further show that hashed page
tables are inherently more scalable than radix designs
and are better suited to accommodate the ever
increasing memory size; their downside is that they
make it more challenging to support such features as
superpages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jog:2016:ECC,
author = "Adwait Jog and Onur Kayiran and Ashutosh Pattnaik and
Mahmut T. Kandemir and Onur Mutlu and Ravishankar Iyer
and Chita R. Das",
title = "Exploiting Core Criticality for Enhanced {GPU}
Performance",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "351--363",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901468",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern memory access schedulers employed in GPUs
typically optimize for memory throughput. They
implicitly assume that all requests from different
cores are equally important. However, we show that
during the execution of a subset of CUDA applications,
different cores can have different amounts of tolerance
to latency. In particular, cores with a larger fraction
of warps waiting for data to come back from DRAM are
less likely to tolerate the latency of an outstanding
memory request. Requests from such cores are more
critical than requests from others. Based on this
observation, this paper introduces a new memory
scheduler, called (C)ritica(L)ity (A)ware (M)emory
(S)cheduler (CLAMS), which takes into account the
latency-tolerance of the cores that generate memory
requests. The key idea is to use the fraction of
critical requests in the memory request buffer to
switch between scheduling policies optimized for
criticality and locality. If this fraction is below a
threshold, CLAMS prioritizes critical requests to
ensure cores that cannot tolerate latency are serviced
faster. Otherwise, CLAMS optimizes for locality,
anticipating that there are too many critical requests
and prioritizing one over another would not
significantly benefit performance. We first present a
core-criticality estimation mechanism for determining
critical cores and requests, and then discuss issues
related to finding a balance between criticality and
locality in the memory scheduler. We progressively
devise three variants of CLAMS, and show that the
Dynamic CLAMS provides significantly higher
performance, across a variety of workloads, than the
commonly-employed GPU memory schedulers optimized
solely for locality. The results indicate that a GPU
memory system that considers both core criticality and
DRAM access locality can provide significant
improvement in performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2016:SSR,
author = "Lam M. Nguyen and Alexander L. Stolyar",
title = "A Service System with Randomly Behaving On-demand
Agents",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "365--366",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a service system where agents (or,
servers) are invited on-demand. Customers arrive as a
Poisson process and join a customer queue. Customer
service times are i.i.d. exponential. Agents' behavior
is random in two respects. First, they can be invited
into the system exogenously, and join the agent queue
after a random time. Second, with some probability they
rejoin the agent queue after a service completion, and
otherwise leave the system. The objective is to design
a real-time adaptive agent invitation scheme that keeps
both customer and agent queues/waiting-times small. We
study an adaptive scheme, which controls the number of
pending agent invitations, based on queue-state
feedback. We study the system process fluid limits, in
the asymptotic regime where the customer arrival rate
goes to infinity. We use the machinery of switched
linear systems and common quadratic Lyapunov functions
to derive sufficient conditions for the local stability
of fluid limits at the desired equilibrium point (with
zero queues). We conjecture that, for our model, local
stability is in fact sufficient for global stability of
fluid limits; the validity of this conjecture is
supported by numerical and simulation experiments. When
the local stability conditions do hold, simulations
show good overall performance of the scheme.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Novakovic:2016:ALI,
author = "Stanko Novakovic and Alexandros Daglis and Edouard
Bugnion and Babak Falsafi and Boris Grot",
title = "An Analysis of Load Imbalance in Scale-out Data
Serving",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "367--368",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the natural parallelism across lookups,
performance of distributed key-value stores is often
limited due to load imbalance induced by heavy skew in
the popularity distribution of the dataset. To avoid
violating service level objectives expressed in terms
of tail latency, systems tend to keep server
utilization low and organize the data in micro-shards,
which in turn provides units of migration and
replication for the purpose of load balancing. These
techniques reduce the skew, but incur additional
monitoring, data replication and consistency
maintenance overheads. This work shows that the trend
towards extreme scale-out will further exacerbate the
skew-induced load imbalance, and hence the overhead of
migration and replication.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:2016:APC,
author = "Yi Cao and Javad Nejati and Pavan Maguluri and Aruna
Balasubramanian and Anshul Gandhi",
title = "Analyzing the Power Consumption of the Mobile Page
Load",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "369--370",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901491",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qureshi:2016:ATL,
author = "Mubashir Adnan Qureshi and Ajay Mahimkar and Lili Qiu
and Zihui Ge and Sarat Puthenpura and Nabeel Mir and
Sanjeev Ahuja",
title = "Automated Test Location Selection For Cellular Network
Upgrades",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "371--372",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901505",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cellular networks are constantly evolving due to
frequent changes in radio access and end user equipment
technologies, applications, and traffic. Network
upgrades should be performed with extreme caution since
millions of users heavily depend on the cellular
networks. Before upgrading the entire network, it is
important to conduct field evaluation of upgrades.The
choice and number of field test locations have
significant impact on the time-to-market and confidence
in how well various network upgrades will work out in
the rest of the network. We propose a novel approach
--- Reflection to automatically determine where to
conduct the upgrade field tests to accurately identify
important features that affect the upgrade and predict
for the performance of untested locations. We
demonstrate its effectiveness using real traces
collected from a major US cellular network as well as
synthetic traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2016:CCA,
author = "Wenjie Liu and Ping Huang and Kun Tang and Ke Zhou and
Xubin He",
title = "{CAR}: a Compression-Aware Refresh Approach to Improve
Memory Performance and Energy Efficiency",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "373--374",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901498",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "DRAM memory is suffering increasingly aggravating
refresh penalty, which no longer causes trivial
performance degradation and power consumption. As
memory capacity increases, refresh penalty has become
increasingly worse as more rows have to be refreshed.
In this work, we propose a simple, practical, and
effective refresh approach called CAR
(Compression-Aware Refresh) to efficiently mitigate
refresh overheads. We apply data compression technique
to store data in compressed format so that data blocks
which are originally distributed across all the
constituent chips of a rank only need to be stored in a
subset of those chips, leaving banks in the remaining
chips not fully occupied. As a result, the memory
controller can safely skip refreshing memory rows which
contain no useful data without compromising data
integrity. Such a compression-aware refresh scheme can
result in significant refresh savings and thus improve
overall memory performance and energy efficiency.
Moreover, to further take advantage of data
compression, we adopt the rank subsetting technique to
enable accesses to only those occupied chips for memory
requests accessing compressed data blocks. Evaluations
using benchmarks from SPEC CPU 2006 and the PARSEC 3.0
on the recent DDR4 memory systems have shown that CAR
can achieve up to 1.66x performance improvement (11.7\%
on average).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Poloczek:2016:CER,
author = "Felix Poloczek and Florin Ciucu",
title = "Contrasting Effects of Replication in Parallel
Systems: From Overload to Underload and Back",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "375--376",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901499",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Task replication has recently been advocated as a
practical solution to reduce latencies in parallel
systems. In addition to several convincing empirical
studies, analytical results have been provided, yet
under some strong assumptions such as independent
service times of the replicas, which may lend
themselves to some contrasting and perhaps contriving
behavior. For instance, under the independence
assumption, an overloaded system can be stabilized by a
replication factor, but can be sent back in overload
through further replication. Motivated by the need to
dispense with such common and restricting assumptions,
which may cause unexpected behavior, we develop a
unified and general theoretical framework to compute
tight bounds on the distribution of response times in
general replication systems. These results immediately
lend themselves to the optimal number of replicas
minimizing response time quantiles, depending on the
parameters of the system (e.g., the degree of
correlation amongst replicas).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2016:EBR,
author = "Benny {Van Houdt}",
title = "Explicit Back-off Rates for Achieving Target
Throughputs in {CSMA\slash CA} Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "377--379",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "CSMA/CA networks have often been analyzed using a
stylized model that is fully characterized by a vector
of back-off rates and a conflict graph. We present an
explicit formula for the unique vector of back-off rate
needed to achieve any achievable throughput vector
provided that the network has a chordal conflict graph.
These back-off rates are such that the back-off rate of
a node only depends on its own target throughput and
the target throughput of its neighbors and can be
determined in a distributed manner. We also introduce a
distributed chordal approximation algorithm for general
conflict graphs which is shown (using numerical
examples) to be more accurate than the Bethe
approximation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2016:FDR,
author = "Liang Liu and Yating Wang and Lance Fortnow and Jin Li
and Jun Xu",
title = "Freestyle Dancing: Randomized Algorithms for Dynamic
Storage Load-Balancing",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "381--382",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we study a challenging research problem
that arises in minimizing the cost of storing customer
data online for reliable accesses in a cloud. It is how
to near-perfectly balance the remaining capacities of
all disks across the cloud system while adding new file
blocks so that the inevitable event of capacity
expansion can be postponed as much as possible. The
challenges of solving this problem are twofold. First,
new file blocks are added to the cloud concurrently by
many dispatchers (computing servers) that have no
communication or coordination among themselves. Though
each dispatcher is updated with information on disk
occupancies, the update is infrequent and not
synchronized. Second, for fault-tolerance purposes, a
combinatorial constraint has to be satisfied in
distributing the blocks of each new file across the
cloud system. We propose a randomized algorithm, in
which each dispatcher independently samples a
blocks-to-disks assignment according to a probability
distribution on a set of assignments conforming to the
aforementioned combinatorial requirement. We show that
this algorithm allows a cloud system to near-perfectly
balance the remaining disk capacities as rapidly as
theoretically possible, when starting from any
unbalanced state that is correctable mathematically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ren:2016:JDP,
author = "Xiaoqi Ren and Palma London and Juba Ziani and Adam
Wierman",
title = "Joint Data Purchasing and Data Placement in a
Geo-Distributed Data Market",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "383--384",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies design challenges faced by a
geo-distributed cloud data market: which data to
purchase (data purchasing) and where to place/replicate
the data (data placement). We show that the joint
problem of data purchasing and data placement within a
cloud data market is NP-hard in general. However, we
give a provably optimal algorithm for the case of a
data market made up of a single data center, and then
generalize the structure from the single data center
setting and propose Datum, a near-optimal,
polynomial-time algorithm for a geo-distributed data
market.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukhopadhyay:2016:MRB,
author = "Arpan Mukhopadhyay and Ravi R. Mazumdar and Rahul
Roy",
title = "Majority Rule Based Opinion Dynamics with Biased and
Stubborn Agents",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "385--386",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we investigate the impact of
majority-rule based random interactions among agents in
a large social network on the diffusion of opinions in
the network. Opinion of each agent is assumed to be a
binary variable taking values in the set {0, 1}.
Interactions among agents are modeled using the
majority rule, where each agent updates its opinion at
random instants by adopting the ' majority ' opinion
among a group of randomly sampled agents. We
investigate two scenarios that respectively incorporate
`bias' of the agents towards a specific opinion and
stubbornness of some of the agents in the majority rule
dynamics. For the first scenario, where all the agents
are assumed to be ' biased ' towards one of the
opinions, it is shown that the agents reach a consensus
on the preferred opinion (with high probability) only
if the initial fraction of agents having the preferred
opinion is above a certain threshold. Furthermore, the
mean time taken to reach the consensus is shown to be
logarithmic in the network size. In the second
scenario, where the presence of ' stubborn ' agents,
who never update their opinions, is assumed, we
characterize the equilibrium distribution of opinions
of the non-stubborn agents using mean field techniques.
The mean field limit is shown to have multiple stable
equilibrium points which leads to a phenomenon known as
metastability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raja:2016:MFE,
author = "Vamseedhar Reddyvari Raja and Vinod Ramaswamy and
Srinivas Shakkottai and Vijay Subramanian",
title = "Mean Field Equilibria of Pricing Games in {Internet}
Marketplaces",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "387--388",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901495",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model an Internet marketplace using a set of
servers that choose prices for performing jobs. Each
server has a queue of unfinished jobs, and is penalized
for delay by the market maker via a holding cost. A
server completes jobs with a low or high ``quality'',
and jobs truthfully report the quality with which they
were completed. The best estimate of quality based on
these reports is the ``reputation'' of the server. A
server bases its pricing decision on the distribution
of its competitors offered prices and reputations. An
entering job is given a random sample of servers, and
chooses the best one based on a linear combination of
price and reputation. We seek to understand how prices
would be determined in such a marketplace using the
theory of Mean Field Games. We show the existence of a
Mean Field Equilibrium and show how reputation plays a
role in allowing servers to declare larger prices than
their competitors. We illustrate our results by a
numerical study of the system via simulation with
parameters chosen from data gathered from existing
Internet marketplaces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafaei:2016:MSD,
author = "Mansour Shafaei and Mohammad Hossein Hajkazemi and
Peter Desnoyers and Abutalib Aghayev",
title = "Modeling {SMR} Drive Performance",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "389--390",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901496",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Combes:2016:MSF,
author = "Richard Combes and Habib Sidi and Salah Elayoubi",
title = "Multipath Streaming: Fundamental Limits and Efficient
Algorithms",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "391--392",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate streaming over multiple links. We
provide lower bounds on the starvation probability of
any policy and simple, order-optimal policies with
matching and tractable upper bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shekaramiz:2016:NCA,
author = "Alireza Shekaramiz and Jorg Liebeherr and Almut
Burchard",
title = "Network Calculus Analysis of a Feedback System with
Random Service",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "393--394",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901487",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Feedback mechanisms are integral components of network
protocols and traffic control algorithms. Their
performance evaluation is hard due to intricate time
correlations introduced by feedback. Network calculus
has been successfully applied for the analysis of
feedback mechanisms in deterministic systems. However,
an extension to random systems has remained an open
problem for more than a decade. We present a stochastic
network calculus analysis of a random system with
feedback, specifically, a window flow control system
with random service and fixed feedback delay. We
quantify the service impediment due to the feedback
mechanism by deriving statistical lower bounds on the
available service, and obtain complementary upper
bounds. We also discover special cases where an exact
description of the service is feasible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahmed:2016:QAL,
author = "Adnan Ahmed and Zubair Shafiq and Amir Khakpour",
title = "{QoE} Analysis of a Large-Scale Live Video Streaming
Event",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "395--396",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901504",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Streaming video has received a lot of attention from
industry and academia. In this work, we study the
characteristics and challenges associated with
large-scale live video delivery. Using logs from a
commercial Content Delivery Network (CDN), we study
live video delivery for a major entertainment event
that was streamed by hundreds of thousands of viewers
in North America. We analyze Quality-of-Experience
(QoE) for the event and note that a significant number
of users suffer QoE impairments. As a consequence of
QoE impairments, these users exhibit lower engagement
metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2016:SRL,
author = "Sen Yang and Bill Lin and Jun Xu",
title = "Safe Randomized Load-Balanced Switching by Diffusing
Extra Loads",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "397--398",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load-balanced switch architectures are known to be
scalable in both size and speed, which is of interest
due to the continued exponential growth in Internet
traffic. However, the main drawback of load-balanced
switches is that packets can depart out of order from
the switch. Randomized load-balancing of application
flows by means of hashing on the packet header is a
well-known simple solution to this packet reordering
problem in which all packets belonging to the same
application flow are routed through the same
intermediate port and hence the same path through the
switch. Unfortunately, this method of load-balancing
can lead to instability, depending on the mix of flow
sizes and durations in the group of flows that gets
randomly assigned to route through the same
intermediate port. In this paper, we show that the
randomized load-balancing of application flows can be
enhanced to provably guarantee both stability and
packet ordering by extending the approach with safety
mechanisms that can uniformly diffuse packets across
the switch whenever there is a build-up of packets
waiting to route through the some intermediate port.
Although simple and intuitive, our experimental results
show that our extended randomized load-balancing
approach significantly outperforms existing
load-balanced switch architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ray:2016:SSC,
author = "Avik Ray and Sujay Sanghavi and Sanjay Shakkottai",
title = "Searching For A Single Community in a Graph",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "399--400",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901494",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In standard graph clustering/community detection, one
is interested in partitioning the graph into more
densely connected subsets of nodes. In contrast, the
search problem of this paper aims to only find the
nodes in a single such community, the target, out of
the many communities that may exist. To do so, we are
given suitable side information about the target; for
example, a very small number of nodes from the target
are labeled as such. We consider a general yet simple
notion of side information: all nodes are assumed to
have random weights, with nodes in the target having
higher weights on average. Given these weights and the
graph, we develop a variant of the method of moments
that identifies nodes in the target more reliably, and
with lower computation, than generic community
detection methods that do not use side information and
partition the entire graph.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2016:SMY,
author = "Daiping Liu and Xing Gao and Mingwei Zhang and Haining
Wang",
title = "Shoot for the {Moon} and You Will Never Miss:
Characterizing and Detecting Aimbots in Online Games",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "401--402",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Giovanidis:2016:SML,
author = "Anastasios Giovanidis and Apostolos Avranas",
title = "Spatial Multi-{LRU} Caching for Wireless Networks with
Coverage Overlaps",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "403--405",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901483",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This article introduces a novel family of
decentralised caching policies for wireless networks,
referred to as spatial multi-LRU. Based on these, cache
inventories are updated in a way that provides content
diversity to users that are covered by, and thus have
access to, more than one station. Two variations are
proposed, the multi-LRU-One and -All, which differ in
the number of replicas inserted in the involved edge
caches. Che-like approximations are proposed to
accurately predict their hit probability under the
Independent Reference Model (IRM). For IRM traffic
multi-LRU-One outperforms multi-LRU-All, whereas when
the traffic exhibits temporal locality the -All
variation can perform better.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Narayanan:2016:SFD,
author = "Iyswarya Narayanan and Di Wang and Myeongjae Jeon and
Bikash Sharma and Laura Caulfield and Anand
Sivasubramaniam and Ben Cutler and Jie Liu and
Badriddine Khessib and Kushagra Vaid",
title = "{SSD} Failures in Datacenters: What, When and Why?",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "407--408",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901489",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the growing popularity of Solid State Disks
(SSDs) in the datacenter, little is known about their
reliability characteristics in the field. The little
knowledge is mainly vendor supplied, which cannot
really help understand how SSD failures can manifest
and impact production systems, in order to take
appropriate actions. Besides failure data, a detailed
characterization requires wide spectrum of data about
factors influencing SSD failures, right from
provisioning (what models' where and when deployed'
etc.) to the operational ones (workloads, read-write
intensities, write amplification, etc.). We analyze
over half a million SSDs that span multiple generations
spread across several datacenters which host a wide
range of workloads over nearly 3 years. By studying the
diverse set of factors on SSD failures, and their
symptoms, our work provides the first look at the what,
when and why characteristics of SSD failures in
production datacenters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gardner:2016:PCR,
author = "Kristen Gardner and Samuel Zbarsky and Mor
Harchol-Balter and Alan Scheller-Wolf",
title = "The Power of $d$ Choices for Redundancy",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "409--410",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An increasingly prevalent technique for improving
response time in queueing systems is the use of
redundancy. In a system with redundant requests, each
job that arrives to the system is copied and dispatched
to multiple servers. As soon as the first copy
completes service, the job is considered complete, and
all remaining copies are deleted. A great deal of
empirical work has demonstrated that redundancy can
significantly reduce response time in systems ranging
from Google's BigTable service to kidney transplant
waitlists. We propose a theoretical model of
redundancy, the Redundancy-d system, in which each job
sends redundant copies to d servers chosen uniformly at
random. We derive the first exact expressions for mean
response time in Redundancy-d systems with any finite
number of servers. We also find asymptotically exact
expressions for the distribution of response time as
the number of servers approaches infinity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2016:TBB,
author = "Hui Wang and Peter Varman",
title = "Time-Based Bandwidth Allocation for Heterogeneous
Storage",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "411--413",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Providing fairness and system efficiency are
important, often conflicting, requirements when
allocating shared resources. In a hybrid storage system
the problem is complicated by the high variability in
request service times, caused by speed differences
between heterogeneous devices and workload-specific
variations in access time within a device. This paper
describes a model for fair time-based resource
allocation in a hybrid storage system that has better
fairness and efficiency than traditional IOPS-based
schemes. An analytical model is developed and an
optimal algorithm for fairly allocating device times to
applications while maximizing the system throughput or
utilization is presented. The results are validated
using Linux implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2016:TMR,
author = "Wei Wang and Baochun Li and Ben Liang and Jun Li",
title = "Towards Multi-Resource Fair Allocation with Placement
Constraints",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "415--416",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901493",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-resource fair schedulers have been widely
implemented in compute clusters to provide service
isolation guarantees. Existing multi-resource sharing
policies, notably Dominant Resource Fairness (DRF) and
its variants, are designed for unconstrained jobs that
can run on all machines in a cluster. However, an
increasing number of datacenter jobs specify placement
constraints and can only run on a particular class of
machines meeting specific hardware/software
requirements (e.g., GPUs or a particular kernel
version). We show that directly extending existing
policies to constrained jobs either compromises
isolation guarantees or allows users to gain more
resources by deceiving the scheduler. It remains
unclear how multi-resource fair sharing is defined and
achieved in the presence of placement constraints. We
address this open problem by a new sharing policy,
called Task Share Fairness (TSF), that provides
provable isolation guarantees and is strategy-proof
against gaming the allocation policy. TSF is shown to
be envy-free and Pareto optimal as well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2016:TDR,
author = "Hong Xie and Richard T. B. Ma and John C. S. Lui",
title = "Trading Discount for Reputation?: On the Design and
Analysis of {E}-Commerce Discount Mechanisms",
journal = j-SIGMETRICS,
volume = "44",
number = "1",
pages = "417--418",
month = jun,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/2964791.2901500",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Jun 30 16:31:56 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop an optimization framework to trade
short-term profits for reputation (i.e., reducing
ramp-up time). We apply the stochastic bandits
framework to design an online discounting mechanism
which infers the optimal discount from a seller's
historical transaction data. We conduct experiments on
an eBay's dataset and show that our online discounting
mechanism can trade 60\% of the shortterm profits for
reducing the ramp-up time by 40\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2016:ETI,
author = "Mark S. Squillante",
title = "Estimation of the traffic intensity in a
piecewise-stationary {Mt/Gt/1} queue with probing",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "3--5",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We use a probing strategy to estimate the time
dependent traffic intensity in an Mt/Gt/1 queue, where
the arrival rate and the general service-time
distribution change from one time interval to another,
and derive statistical properties of the proposed
estimator. We present a method to detect a switch from
a stationary interval to another using a sequence of
probes to improve the estimation. At the end, we
compare our results with two estimators proposed in the
literature for the M/G/1 queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2016:CLF,
author = "Nicolas Gast",
title = "Construction of {Lyapunov} Functions via Relative
Entropy with Application to Caching",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "6--8",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003980",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system of interacting objects that is a
generalization of the model of the cache-replacement
policy RAND(m) introduced in [6]. We provide a
mean-field approximation of this system. We show how to
use relative entropy to construct a Lyapunov function
for this model. This guarantees that the mean-field
model converges to its unique fixed point.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Domingues:2016:SPT,
author = "Guilherme Domingues and Edmundo {de Souza e Silva} and
Rosa Le{\~a}o and Daniel Menasch{\'e} and Don Towsley",
title = "Search and Placement in Tiered Cache Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "9--11",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003981",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cai:2016:GIS,
author = "Kechao Cai and Hong Xie and John C. S. Lui",
title = "Generalization of Information Spreading Forensics via
Sequential Dependent Snapshots",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "12--14",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Learning the characteristics of information spreading
in networks is crucial in communication studies, social
network sentiment analysis and epidemic investigation.
Previous work on information spreading has been focused
on the information source detection using either a
single observation, or multiple but ``independent''
observations of the underlying network while assuming
information spreads at a ``uniform spreading rate''. In
this paper, we conduct the first theoretical and
experimental study on information spreading forensics,
and propose a framework for estimating information
spreading rates, information source start time and
location of information source by utilizing ``multiple
sequential and dependent snapshots'' where information
can spread at different rates. We prove that our
estimation framework generalizes the rumor centrality
[1], and we allow heterogeneous information spreading
rates on different branches in d -regular tree
networks. We further show that our framework can
provide highly accurate estimates for the information
spreading rates on different branches, the source start
time, and more accurate estimate for the location of
information source than rumor centrality and Jordan
center in both synthetic networks and real-world
networks (i.e., Twitter).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:2016:ASS,
author = "Erol Gelenbe",
title = "Agreement in Spins and Social Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "15--17",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003983",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a model of mutually influencing
individuals who hold one of two opinions about some
matter. This is similar to a system of particles that
can have one of two spin states. The model considers N
sub-systems where individuals stay for some time, and
then move from one subnetwork to another independently
of each other, or they may leave the overall network
from any one of the sub-systems. They arrive externally
to any sub-system according to independent Poisson
processes. In each sub-system individuals can influence
each other to align with their own opinion or spin, and
their opinion can also fluctuate at random in either
opinion. The system also allows for a bias or
directional field in any of the sub-systems that
influences the individuals or spins that are locally
present. We show that even with a weak bias, when
random fluctuations become small then all the
individuals or spins in a given sub-network will align
with probability one, to the opinion or spin direction
represented by the bias or spin.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doncel:2016:MFG,
author = "Josu Doncel and Nicolas Gast and Bruno Gaujal",
title = "Are Mean-field Games the Limits of Finite Stochastic
Games?",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "18--20",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003984",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feinberg:2016:SOS,
author = "Eugene A. Feinberg and Yan Liang",
title = "Structure of Optimal Solutions to Periodic-Review
Total-Cost Stochastic Inventory Control Problems",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "21--23",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003985",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the structure of optimal policies
for discounted periodic-review single-commodity
total-cost inventory control problems with fixed
ordering costs for finite and infinite horizons. There
are known conditions in the literature for optimality
of (st, St) policies for finite-horizon problems and
the optimality of (s, S) policies for infinite-horizon
problems. The results of this paper cover the
situation, when such assumptions may not hold. This
paper describes a parameter, which, together with the
value of the discount factor and the horizon length,
defines the structure of an optimal policy. For the
infinite horizon, depending on the values of this
parameter and the discount factor, an optimal policy
either is an (s, S) policy or never orders inventory.
For a finite horizon, depending on the values of this
parameter, the discount factor, and the horizon length,
there are three possible structures of an optimal
policy: (i) it is an (st, St) policy, (ii) it is an
(st, St) policy at earlier stages and then does not
order inventory, or (iii) it never orders inventory.
The paper also establishes continuity of the optimal
value function and describes the optimal actions at
states st and s.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fricker:2016:ADR,
author = "Christine Fricker and Fabrice Guillemin and Philippe
Robert",
title = "Analysis of Downgrading for Resource Allocation",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "24--26",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2016:RBD,
author = "Yingdong Lu and Siva Theja Maguluri and Mark S.
Squillante and Chai Wah Wu",
title = "Risk-Based Dynamic Allocation of Computing Resources",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "27--29",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003987",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sermpezis:2016:IDS,
author = "Pavlos Sermpezis and Xenofontas Dimitropoulos",
title = "Inter-domain {SDN}: Analysing the Effects of Routing
Centralization on {BGP} Convergence Time",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "30--32",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003988",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gardner:2016:URT,
author = "Kristen Gardner and Samuel Zbarsky and Mark
Velednitsky and Mor Harchol-Balter and Alan
Scheller-Wolf",
title = "Understanding Response Time in the Redundancy-$d$
System",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "33--35",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An increasingly prevalent technique for improving
response time in queueing systems is the use of
redundancy. In a system with redundant requests, each
job that arrives to the system is copied and dispatched
to multiple servers. As soon as the first copy
completes service, the job is considered complete, and
all remaining copies are deleted. A great deal of
empirical work has demonstrated that redundancy can
significantly reduce response time in systems ranging
from Google's BigTable service to kidney transplant
waitlists. We propose a theoretical model of
redundancy, the Redundancy-$d$ system, in which each
job sends redundant copies to d servers chosen
uniformly at random. We derive the first exact
expressions for mean response time in Redundancy-$d$
systems with any finite number of servers. We also find
asymptotically exact expressions for the distribution
of response time as the number of servers approaches
infinity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:2016:UPD,
author = "Debankur Mukherjee and Sem Borst and Johan van
Leeuwaarden and Phil Whiting",
title = "Universality of Power-of-$d$ Load Balancing Schemes",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "36--38",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003990",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system of N parallel queues with unit
exponential service rates and a single dispatcher where
tasks arrive as a Poisson process of rate $ \lambda (N)
$. When a task arrives, the dispatcher assigns it to a
server with the shortest queue among $ d (N) \leq N $
randomly selected servers. This load balancing policy
is referred to as a power-of-$ d(N) $ or JSQ($ d(N)$)
scheme, and subsumes the Join-the-Shortest Queue (JSQ)
policy as a crucial special case for $ d(N) = N$. We
construct a coupling to bound the difference in the
queue length processes between the JSQ policy and an
arbitrary value of $ d(N)$. We use the coupling to
derive the fluid limit in the regime where $ \lambda
(N) / N \to \lambda < 1$ and $ d(N) \to \infty $ as $ N
\to \infty $, along with the corresponding fixed point.
The fluid limit turns out not to depend on the exact
growth rate of $ d(N)$, and in particular coincides
with that for the JSQ policy. We further leverage the
coupling to establish that the diffusion limit in the
regime where $ (N - \lambda (N)) / \sqrt N \to \beta >
0$ and $ d(N) / \sqrt N \log N \to \infty $ as $ N \to
\infty $ corresponds to that for the JSQ policy. These
results indicate that the stochastic optimality of the
JSQ policy can be preserved at the fluid-level and
diffusion-level while reducing the overhead by nearly a
factor $ O(N)$ and $ O(\sqrt N)$, respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Juneja:2016:LQ,
author = "S. Juneja and D. Manjunath",
title = "To Lounge or to Queue Up",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "39--41",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003991",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stolyar:2016:LSS,
author = "Alexander L. Stolyar",
title = "Large-scale Service Systems with Packing Constraints
and Heterogeneous Servers",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "42--44",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003992",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A service system with multiple types of arriving
customers and multiple types of servers is considered.
Several customers (possibly of different types) can be
placed for concurrent service into same server, subject
to ``packing'' constraints, which depend on the server
type. Service times of different customers are
independent, even if served simultaneously by the same
server. The largescale asymptotic regime is considered
such that the customer arrival rates grow to infinity.
We consider two variants of the model. For the
infinite-server model, we prove asymptotic optimality
of the Greedy Random (GRAND) algorithm in the sense of
minimizing the weighted (by type) number of occupied
servers in steady state. (This version of GRAND
generalizes that introduced in [1] for the homogeneous
systems, with all servers of same type.) We then
introduce a natural extension of GRAND algorithm for
finite-server systems with blocking. Assuming
subcritical system load, we prove existence,
uniqueness, and local stability of the large-scale
system equilibrium point such that no blocking occurs.
This result strongly suggests a conjecture that the
steady-state blocking probability under the algorithm
vanishes in the large-scale limit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2016:DOS,
author = "Y. Lu and S. T. Maguluri and M. S. Squillante and T.
Suk",
title = "Delay-Optimal Scheduling for Some Input-Queued
Switches",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "45--47",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003993",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The seventh annual GreenMetrics Workshop was held on
June 14, 2016 in Antibes Juan-les-Pins, France, in
conjunction with the ACM SIGMETRICS/IFIP Performance
2016 conference. For the past five years the workshop
has been expanded from topics on the energy and
ecological impact of Information and Communication
Technology (ICT) systems, to include emerging work on
the Smart Grid. Topics of interest fall broadly into
three main areas: designing sustainable ICT, ICT for
sustainability, and building a smarter, more
sustainable electricity grid. The workshop brought
together researchers from the traditional SIGMETRICS
and Performance communities with researchers and
practitioners in the three areas above, to exchange
technical ideas and experiences on issues related to
sustainability and ICT. The workshop program included
three 45-min keynote talks, and nine 20-min
presentations of technical papers. All papers are
included in this special issue and we briefly summarize
the keynote talks here. In the first keynote ``The New
Sharing Economy for the Grid2050'', Kameshwar Poolla
from UC Berkeley discussed three sharing economy
opportunities in the electricity sector- sharing
storage, sharing PV generation, and sharing recruited
demand flexibility. He also discussed regulatory and
technical challenges to these opportunities. In
addition, he presented a micro-economic analysis of
decisions by firms, and quantify the benefits of
sharing to various participants. Xue (Steve) Liu from
McGill University presented the second keynote talk,
titled ``When Bits Meet Joules: A View from Data Center
Operations' Perspective''. He used data centers as an
example to illustrate the importance of the codesign of
information technologies and new energy technologies.
Specifically, he focused on how to design cost-saving
power management strategies for Internet data center
operations. Our third keynote talk was by Florian
D{\"o}rfler from ETH Z{\"u}rich, titled ``Virtual
Inertia Emulation and Placement in Power Grids''. He
presented a comprehensive analysis to address the
optimal inertia placement problem, in particular, by
providing a set of closed-form global optimality
results for particular problem instances as well as a
computational approach resulting in locally optimal
solutions. He illustrated the results with a
three-region power grid case study. The best student
paper award was given to ``Opportunities for Price
Manipulation by Aggregators in Electricity Markets'' by
Ruhi et al. The award was determined by a committee of
the invited speakers, chaired by Catherine Rosenberg,
after considering both the papers and the presentations
of the candidates. The authors quantified the profit an
aggregator can obtain through strategic curtailment of
generation in an electricity market. Efficient
algorithms were shown to exist when the topology of the
network is radial (acyclic). Further, significant
increases in profit can be obtained through strategic
curtailment in practical settings. Demand response is
discussed in the following two papers. In ``Optimizing
the Level of Commitment in Demand Response'', Comden et
al. proposed a generalized demand response framework
called Flexible Commitment Demand Response (FCDR) to
allow for explicit choices of the level of commitment.
Numerical simulations were conducted to demonstrate
that FCDR brings in significant (around 50\%) social
cost reductions and benefits both the LSE and customers
simultaneously. In ``An Emergency Demand Response
Mechanism for Cloud Computing'', Zhou et al. proposed
an online auction for dynamic cloud resource
provisioning under the emergency demand response
program, which runs in polynomial time, achieves
truthfulness and close-to-optimal social welfare for
the cloud ecosystem. Geographical load balancing was
examined by Neglia et al. in ``Geographical Load
Balancing Across Green Datacenters: a Mean Field
Analysis''. They modeled via a Markov Chain the problem
of scheduling jobs by prioritizing datacenters where
renewable energy is currently available. Mean field
techniques were employed to derive an asymptotic
approximate model and to investigate relationships and
tradeoffs among the various system parameters. In
``Emergence of Shared Behaviour in Distributed
Scheduling Systems for Domestic Appliances'', Facchini
et al. showed social interaction can increase the
flexibility of users and lower the peak power,
resulting in a more smooth usage of energy throughout
the day. Rossi et al. examined public lighting in
``AURORA: an Energy Efficient Public Lighting IoT
System for Smart Cities'' by proposing Aurora: a
low-budget, easy-to-deploy IoT control system. Aurora
was deployed in a mid-size Italian municipality and its
performance over 4 months was evaluated to quantify
both the power and the economic saving. Wireless and
wired network power consumption was studied in the
following three papers. In ``Radio Resource Management
for Improving Energy Self-Sufficiency of Green Mobile
Networks'', Dalmasso et al. designed Resource on Demand
strategies to reduce the base station cluster energy
consumption and to adapt it to energy availability. Fan
etal. also examined base stations in ``Boosting Service
Availability for Base Stations of Cellular Networks by
Event-Driven Battery Profiling'' by conducting a
systematical analysis on a real world dataset and
proposing an event-driven battery profiling approach to
precisely extract the features that cause the working
condition degradation of the battery group. Last but
not least, in ``Toward Power-Efficient Backbone
Routers'', Lu et al. studied how InTerFaces can
distribute traffic flows to the Processing Engines
(PEs) so that the offered loads on all active PEs are
near-perfectly balanced over time, and kept close to a
target load, so that the number of active PEs can be
minimized. The papers presented at the workshop
reflected a current concern of energy consumption
associated with proliferating data centers, and other
fundamental issues in green computing. The workshop
incited interesting discussions and exchange among
participants from North America, Europe, and Asia.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruhi:2016:OPM,
author = "Navid Azizan Ruhi and Niangjun Chen and Krishnamurthy
Dvijotham and Adam Wierman",
title = "Opportunities for Price Manipulation by Aggregators in
Electricity Markets",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "49--51",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003995",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Aggregators are playing an increasingly crucial role
for integrating renewable generation into power
systems. However, the intermittent nature of renewable
generation makes market interactions of aggregators di
cult to monitor and regulate, raising concerns about
potential market manipulations. In this paper, we
address this issue by quantifying the profit an
aggregator can obtain through strategic curtailment of
generation in an electricity market. We show that,
while the problem of maximizing the benefit from
curtailment is hard in general, efficient algorithms
exist when the topology of the network is radial
(acyclic). Further, we highlight that significant
increases in profit can be obtained through strategic
curtailment in practical settings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comden:2016:OLC,
author = "Joshua Comden and Zhenhua Liu and Yue Zhao",
title = "Optimizing the Level of Commitment in Demand
Response",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "52--67",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003996",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Demand response (DR) is a cost-effective and
environmentally friendly approach for mitigating the
uncertainties in renewable energy integration by taking
advantage of the flexibility of the customers' demand.
Existing DR programs, however, suffer from the
inflexibility of commitment levels. In particular,
these programs can be split into two classes depending
on whether customers are fully committed or fully
voluntary to provide demand response. Full commitment
makes customers reluctant to participate, while the
load serving entity (LSE) cannot rely on voluntary
participation for reliability and dispatchability
considerations. This paper proposes a generalized DR
framework called Flexible Commitment Demand Response
(FCDR) to allow for explicit choices of the level of
commitment. We perform numerical simulations to
demonstrate that the optimal level of commitment in
FCDR brings in significant (around 50\%) social cost
reductions, consistently under various settings. This
benefits both the LSE and customers simultaneously.
Further, lower cost and higher levels of commitment can
be simultaneously achieved with the optimal level of DR
commitment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2016:EDR,
author = "Ruiting Zhou and Zongpeng Li and Chuan Wu",
title = "An Emergency Demand Response Mechanism for Cloud
Computing",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "58--63",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003997",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study emergency demand response (EDR) mechanisms
from data centers' perspective, where a cloud data
center participates in a mandatory EDR program while
receiving online computing job bids. We target a
realistic EDR mechanism where: (i) The cloud provider
dynamically packs different types of resources on
servers into requested VMs and computes job schedules
to meet users' requirements; (ii) The power consumption
of servers in the cloud is limited by the grid through
the EDR program; (iii) The operating cost of the cloud
is considered in the calculation of social welfare,
measured by electricity cost. We propose an online
auction for dynamic cloud resource provisioning under
the EDR program, which runs in polynomial time,
achieves truthfulness and close-to-optimal social
welfare for the cloud ecosystem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neglia:2016:GLB,
author = "Giovanni Neglia and Matteo Sereno and Giuseppe
Bianchi",
title = "Geographical Load Balancing across Green Datacenters:
a Mean Field Analysis",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "64--69",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "``Geographic Load Balancing'' is a strategy for
reducing the energy cost of data centers spreading
across different terrestrial locations. In this paper,
we focus on load balancing among micro-datacenters
powered by renewable energy sources. We model via a
Markov Chain the problem of scheduling jobs by
prioritizing datacenters where renewable energy is
currently available. Not finding a convenient closed
form solution for the resulting chain, we use mean
field techniques to derive an asymptotic approximate
model which instead is shown to have an extremely
simple and intuitive steady state solution. After
proving, using both theoretical and discrete event
simulation results, that the system performance
converges to the asymptotic model for an increasing
number of datacenters, we exploit the simple closed
form model's solution to investigate relationships and
trade-offs among the various system parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Facchini:2016:ESB,
author = "Alessandro Facchini and Cristina Rottondi and Giacomo
Verticale",
title = "Emergence of Shared Behaviour in Distributed
Scheduling Systems for Domestic Appliances",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "70--75",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3003999",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When energy prices change during the day, users will
schedule their appliances with the aim of minimizing
their bill. If the variable price component depends on
the peak demand during each given hour, users will
distribute their consumption more evenly during the
day, resulting in lower peak consumption. The process
can be automated by means of an Energy Management
System that chooses the best schedule that satisfies
the user's delay tolerance threshold. In turn, delay
tolerance thresholds may slowly vary over time. In
fact, users may be willing to change their threshold to
match the threshold of their social group, especially
if there is evidence that friends with a more flexible
approach have paid a lower bill. We show that social
interaction can increase the flexibility of users and
lower the peak power, resulting in a more smooth usage
of energy throughout the day.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rossi:2016:AEE,
author = "Claudio Rossi and Manuel Gaetani and Antonio Defina",
title = "{AURORA}: an Energy Efficient Public Lighting {IoT}
System for Smart Cities",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "76--81",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3004000",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Public lighting accounts for a considerable amount of
the total electricity consumption. Solutions to reduce
its energy (and cost) footprint do exists, and they
often translate into costly lamp replacement projects,
which cannot be afforded by the majority of
municipalities. To solve this situation, we propose
Aurora: a low-budget, easy-to-deploy IoT control system
that exploits the ubiquity of cellular networks (2- 4G)
and scalable Cloud computing architectures to allow
Smart Cities to save on the public lighting electrical
bill. We deploy Aurora in a mid-size Italian
municipality and we evaluate its performance over 4
months to quantify both the power and the economic
saving achieved by our solution. We estimate the impact
of a city-level Aurora installation and further extend
the benefit analysis to most populated EU countries.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dalmasso:2016:RRM,
author = "Mattia Dalmasso and Michela Meo and Daniela Renga",
title = "Radio Resource Management for Improving Energy
Self-sufficiency of Green Mobile Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "82--87",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3004001",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Three factors make power supply one of the most urgent
and challenging issues for the future of mobile
networks. First, the expected fast growth of mobile
traffic raises doubts about the sustainability of
mobile communications, that already account for 0.5\%
of the worldwide energy consumption. Second, power
supply has become far the largest component of the
operational costs of running a network. Third, the
deployment of network infrastructures in emerging
countries is strategic, but, in these countries, the
power grid is not always reliable. Renewable energy
sources can help to cope with these issues. However,
one of their main drawbacks is the intermittent and
difficult to predict energy generation profile. The
feasibility of renewable power supply for base station
(BS) powering depends then by the possibility to reduce
the BS consumption and to adapt it to the amount of
available energy. In this paper, we consider a cluster
of BSs powered with photovoltaic (PV) panels and
equipped with energy storage units. Resource on Demand
(RoD) strategies are implemented to reduce the cluster
energy consumption and to adapt it to energy
availability. The results show that resource of demand
can effectively be applied to make off-grid BS
deployment feasible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fan:2016:BSA,
author = "Xiaoyi Fan and Feng Wang and Jiangchuan Liu",
title = "Boosting Service Availability for Base Stations of
Cellular Networks by Event-driven Battery Profiling",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "88--93",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3004002",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The 3G/4G cellular networks as well as the emerging 5G
have led to an explosive growth on mobile services
across the global markets. Massive base stations have
been deployed to satisfy the demands on service quality
and coverage, and their quantity is only growing in the
foreseeable future. Given the many more base stations
deployed in remote rural areas, maintenance for high
service availability becomes quite challenging. In
particular, they can suffer from frequent power
outages. After such disasters as hurricanes or snow
storms, power recovery can often take several days or
even weeks, during which a backup battery becomes the
only power source. Although power outage is rare in
metropolitan areas, backup batteries are still
necessary for base stations as any service interruption
there can cause unaffordable losses. Given that the
backup battery group installed on a base station is
usually the only power source during power outages, the
working condition of the battery group therefore has a
critical impact on the service availability of a base
station. In this paper, we conduct a systematical
analysis on a real world dataset collected from the
battery groups installed on the base stations of China
Mobile Ltd co., and we propose an event-driven battery
profiling approach to precisely extract the features
that cause the working condition degradation of the
battery group. We formulate the prediction models for
both battery voltage and lifetime and propose a series
of solutions to yield accurate outputs. By real world
trace-driven evaluations, we demonstrate that our
approach can boost the cellular network service
availability with an improvement of up to 18.09\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2016:TPE,
author = "Jianyuan Lu and Liang Liu and Jun ``Jim'' Xu and Bin
Liu",
title = "Toward Power-Efficient Backbone Routers",
journal = j-SIGMETRICS,
volume = "44",
number = "2",
pages = "94--99",
month = sep,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3003977.3004003",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Sep 29 16:48:12 MDT 2016",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, a new design framework, called GreenRouter,
has been proposed to reduce the power consumption of
backbone routers. In a GreenRouter, a line card is
partitioned into two functional parts, namely, an
InTerFace (ITF) part that is relatively much lighter
and a Processing Engine (PE) part that is relatively
much heavier, in power consumption. This partitioning
allows ITFs to share the collective processing
capability of PEs, which in turn allows a significant
percentage of PEs to be put into sleep mode (to save
energy) during periods of light link utilizations. In
this paper, we study how ITFs can distribute traffic
flows to the PEs so that the offered loads on all
active PEs are near-perfectly balanced over time, and
kept close to a target load (say 90\%), so that the
number of active PEs can be minimized. Since Green-
Router's original solution to this problem is quite
crude,we propose a principled solution that has much
lower system overheads and achieves better
load-balance. Through both simulation studies and
rigorous analyses, we show our solution can, with high
probability, rapidly restore the near-perfect load
balance among active PEs, after each PE overload
event.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaze:2016:OBT,
author = "Rahul Vaze and Marceau Coupechoux",
title = "Online Budgeted Truthful Matching",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "3--6",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An online truthful budgeted matching problem is
considered for a bipartite graph, where the right
vertices are available ahead of time, and individual
left vertices arrive sequentially. On arrival of a left
vertex, its edge utilities (or weights) to all the
right vertices and a corresponding cost (or bid) are
revealed. If a left vertex is matched to any of the
right vertices, then it has to be paid at least as much
as its cost. The problem is to match each left vertex
instantaneously and irrevocably to any one of the right
vertices, if at all, to find the maximum weight
matching that is truthful, under a payment budget
constraint. Truthfulness condition requires that no
left vertex has any incentive of misreporting its cost.
Assuming that the vertices arrive in an uniformly
random order (secretary model) with arbitrary
utilities, a truthful algorithm is proposed that is
24{\ss}- competitive (where {\ss} is the ratio of the
maximum and the minimum utility) and satisfies the
payment budget constraint. Direct applications of this
problem include crowdsourcing auctions, and matching
wireless users to cooperative relays in
device-to-device enabled cellular network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:2016:CRS,
author = "Yongwhan Lim and Asuman Ozdaglar and Alexander
Teytelboym",
title = "Competitive Rumor Spread in Social Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "7--14",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a setting in which two firms compete to
spread rumors in a social network. Firms seed their
rumors simultaneously and rumors propagate according to
the linear threshold model. Consumers have (randomly
drawn) heterogeneous thresholds for each product. Using
the concept of cascade centrality introduced by [6], we
provide a sharp characterization of networks in which
games admit pure-strategy Nash equilibria (PSNE). We
provide tight bounds for the efficiency of these
equilibria and for the inequality in firms' equilibrium
payoffs. When the network is a tree, the model is
particularly tractable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:2016:NFC,
author = "Arpit Goel and Vijay Kamble and Siddhartha Banerjee
and Ashish Goel",
title = "Network Formation of Coalition Loyalty Programs",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "15--20",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040234",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harder:2016:TSG,
author = "Reed Harder and Vikrant Vaze",
title = "Two-Stage Game Theoretic Modelling of Airline
Frequency and Fare Competition",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "21--21",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040235",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Airlines make capacity and fare decisions in a
competitive environment. Capacity decisions,
encompassing decisions about frequency of service and
seats-per-flight, affect both the operating costs and
revenues of airlines. These decisions have significant
implications for the performance of the air
transportation system as a whole. Capacity and fare
decisions of different airlines are interdependent,
both serving as tools in an airlines competitive
arsenal. This interdependency motivates a game
theoretic approach to modeling the decision process.
Capacity (especially frequency) decisions are typically
made months in advance of flight departure, with only
an approximate knowledge of what fares will be, while
fare decisions are made weeks to minutes ahead of
flight departure. Several studies have stressed the
need to develop two-stage game theoretic models to
account for the sequential nature of these decisions,
but there are very few analytical, computational, or
empirical results available for such models. In this
article (working paper available at link in [1]), we
develop a two-stage frequency and fare competition
model, demonstrate its tractability across a wide range
of assumptions, and validate its predictions against
observed airline behavior. We take the payoff function
of an airline operating in a set of markets to be the
sum of the differences between revenues and costs in
those markets, with costs as a linear function of
flight frequency. To compute revenue, we explore two
commonly used multinomial logit models of market share.
Frequency decisions are made in the first stage of the
game, while fare decisions are made in the second
stage. We begin our analysis with a simplified version
of this game: two airlines competing in a single
market, with no connecting passengers, infinite seating
capacity, and the absence of a nofly alternative. Under
these assumptions, for either market share model, we
are able to prove that (1) the second-stage fare game
always has unique pure strategy Nash equilibrium
(PSNE), (2) first-stage payoffs for each airline are
concave with respect to that airline's frequency
strategy across plausible utility parameter ranges, and
(3) first-stage payoffs for each airline are submodular
functions in the overall frequency strategy space. As
the game is two-player, (3) means that by changing the
sign of one player's strategy space, we can trivially
convert the game into a supermodular game. These
results demonstrate that subgame-perfect PSNE is a
credible and tractable solution concept for our model.
In particular, the existence and uniqueness results
indicate the suitability of PSNE as a solution concept
for the second-stage game. Concave payoffs ensure that
individual first-stage payoff maximization problems are
efficiently solvable, and supermodularity ensures that
several iterative learning dynamics converge to
equilibrium [2]. We then relax each of the assumptions
made in this simplified model by computationally
solving the second stage fare game, generating
equilibrium fare decisions and profits for every set of
frequency decisions for integer daily frequency values
ranging from 1 to 20, for various numbers of players,
seats per flight, and values of utility parameters
(including the nofly option). Then, we fit quadratic
approximations to these profits as functions of the
frequencies of all players. We find an excellent fit
(R2 {$>$} 0.9) in all cases. Additionally, the signs of
all estimated coefficients are consistent with
submodularity and concavity properties demonstrated
earlier. We show that for an N-player game with such
concave and submodular quadratic payoff functions, the
myopic best response heuristic, where each player
optimizes its payoff against fixed opponent strategies
iteratively, converges to a PSNE. To test the
tractability and predictive validity of our model in
practice, we apply it to a 4-airline, 11-airport
network in the Western U.S, using publicly available
airline operations data. We use the quadratic functions
of airline frequency fitted above and additionally
enforce the aircraft availability constraints, and
solve for equilibrium iteratively using the myopic best
response heuristic. To calibrate the 11 free quadratic
payoff coefficients of this model, we use a stochastic
gradient approximation algorithm to minimize the
absolute errors between observed and predicted
frequency strategies. In practice, the game
convergences to equilibrium quickly, and the calibrated
model's frequency predictions approximate observed
behavior both in-sample and out-of-sample, suggesting
that refinements of the model could be pursued for use
in scenario analysis, forecasting, planning, and policy
making.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hota:2016:STG,
author = "Ashish R. Hota and Harsha Honnappa and Shreyas
Sundaram",
title = "The Superposition-Traffic Game",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "22--25",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040236",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a queuing game where a set of sources or
players strategically send jobs to a single shared
server. The traffic sources have disparate coefficients
of variation of the interarrival times, and the sources
are strategic in their choice of mean inter-arrival
times (or the arrival rates). For every job completion,
each player receives a benefit that potentially depends
on the number of other players using the server
(capturing network effects due to using the same
server). However, the players experience a delay due to
their jobs waiting in the queue. Assuming the service
times have a general distribution with a finite second
moment, we model the delay experienced by the
superposed traffic using a Brownian approximation. In
our first contribution, we show that the total rate of
job arrivals at a Nash equilibrium with n sources is
larger when the sources have heterogeneous coefficients
of variation, while the average delay experienced by a
job is smaller, compared to the equilibrium with an
equal number of homogeneous sources. In the second
contribution, we characterize the equilibrium behavior
of the queuing system when the number of homogeneous
sources scales to infinity in terms of the rate of
growth of the benefits due to network effects.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiffers-Masson:2016:TPD,
author = "Alexandre Reiffers-Masson and Eduardo Hargreaves and
Eitan Altman and Wouter Caarls and Daniel S.
Menasch{\'e}",
title = "Timelines are Publisher-Driven Caches: Analyzing and
Shaping Timeline Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "26--29",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache networks are one of the building blocks of
information centric networks (ICNs). Most of the recent
work on cache networks has focused on networks of
request driven caches, which are populated based on
users requests for content generated by publishers.
However, user generated content still poses the most
pressing challenges. For such content timelines are the
de facto sharing solution. In this paper, we establish
a connection between timelines and publisher-driven
caches. We propose simple models and metrics to analyze
publisher-driven caches, allowing for variable-sized
objects. Then, we design two efficient algorithms for
timeline workload shaping, leveraging admission and
price control in order, for instance, to aid service
providers to attain prescribed service level
agreements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shan:2016:SFU,
author = "Y. Shan and C. {Lo Prete} and G. Kesidis and D. J.
Miller",
title = "A simulation framework for uneconomic virtual bidding
in day-ahead electricity markets: Short talk",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "30--30",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "About two thirds of electricity consumers in the
United States are served by Regional Transmission
Organizations (RTOs) and Independent System Operators
(ISOs). One of their primary responsibilities is the
operation of organized auctions for purchasing and
selling electricity that have a two-settlement
structure with coordinated day-ahead (DA) and real-time
(RT) energy markets. The DA market takes place on the
day before the actual power dispatch, and creates a
financial obligation to deliver and withdraw power from
the transmission grid. In contrast, the RT energy
market is a physical market where predicted and actual
supply and demand of electricity are balanced on the
delivery day. Purely financial transactions, known as
virtual bids, were introduced in wholesale electricity
markets to allow participants (including energy traders
that do not control generation assets or serve load) to
exploit arbitrage opportunities arising from expected
price differences between day-ahead and real-time
energy markets and to enhance convergence between DA
and RT prices. More specifically, virtual demand
(supply) bids are financial positions for the purchase
(sale) of energy in the DA market, which are settled
with a countervailing offer to sell (buy) at the RT
price without the bidder taking title to physical
electricity. Virtual demand bids are typically referred
to as DECs, while virtual supply bids are known as
INCs. Virtual bids clear with generation and load bids
in the DA market, and may set the DA market-clearing
price. Virtual bids have strong interactions with other
elements of the electricity market design. For
instance, Financial Transmission Rights (FTRs) are
financial contracts to hedge transmission congestion
between two nodes in the transmission network (a source
and a sink defined in the contract), and entitle their
holders the right to collect a payment when day-ahead
congestion arises between the source and the sink [1].
Since FTRs settle at the day-ahead prices, virtual bids
could be placed in the day-ahead energy market in order
to affect day-ahead electricity prices in a direction
that enhances the value of the FTRs. In our study, we
consider a model of the DA electricity market at any
node in the network. Market participants include power
generators and loads submitting physical bids, and
financial players placing virtual bids. Virtual bids
affect the DA market clearing prices, but we assume
that they have no impact on RT prices. Theoretical
results on interior Nash equilibria are given, assuming
that virtual bidders can perfectly predict RT prices
and hold no FTRs [2] sinking at the node. We then adopt
a hypergame framework [3] to model the DA market,
assuming imperfect prediction of RT prices by different
virtual bidders. When no market participant holds FTRs,
virtual bidders help achieve convergence between DA and
RT nodal prices, as expected [4]. In this setting, we
also allow one virtual bidder to hold a FTR position
sinking at the node. Our numerical results show that,
with FTR as another source of revenue, the larger the
FTR position, the greater the incentive for the FTR
holder to place uneconomic virtual bids at the FTR sink
to enhance the value of her financial position, in line
with [5, 6]. We also show that the manipulation causes
not only losses for other virtual bidders, but also the
divergence between DA and RT prices. Methods for
detecting such uneconomic bidding are also
investigated. Our technical report is available at
http://www.cse.psu.edu/research/publications/tech-reports/2016/CSE-16-003.pdf.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{LEcuyer:2016:SNN,
author = "Pierre L'Ecuyer and Patrick Maill{\'e} and Nicol{\'a}s
Stier-Moses and Bruno Tuffin",
title = "Search (Non-){Neutrality} and Impact on Innovation:
Short talk",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "31--31",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040239",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2016:PSE,
author = "Qian Ma and Jianwei Huang and Tamer Basar and Ji Liu
and Xudong Chen",
title = "Pricing for Sharing Economy with Reputation",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "32--32",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040240",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gregoire:2016:PHD,
author = "J.-Ch. Gr{\'e}goire and Ang{\`e}le M. Hamel and D.
Marc Kilgour",
title = "Pricing for a Hybrid Delivery Model of Video
Streaming",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "33--36",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040241",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Media streaming for video-on-demand requires a large
initial outlay on server infrastructure. However, a
peer-to-peer system whereby existing customers act as
relays for new customers is a simple way to provide
temporary capacity and gauge demand before committing
to new resources. A customer who agrees to act as a
relay should be provided the service at a discounted
price, but at what price, and is this price affordable
for the content provider? This paper investigates
financial incentives for the hybrid model of video
streaming services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Antonopoulos:2016:ISP,
author = "Angelos Antonopoulos and Chiara Perillo and Christos
Verikoukis",
title = "{Internet} Service Providers vs. Over-the-Top
Companies: Friends or Foes? --- Short talk",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "37--37",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040242",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The recent appearance of Over-the-Top (OTT) providers,
who offer similar services (e.g., voice and messaging)
to those of the existing Internet Service Providers
(ISPs), was the main reason for a long-standing
conversation with regard to the network neutrality,
i.e., the prioritization of different types of data in
the network. In particular, ISPs oppose network
neutrality, claiming that OTT companies: (i) have
conflicting interests and provide competitive services,
thus constituting a threat to their own growth, and
(ii) distort incentives for investment, as they
essentially exploit the network already deployed by
ISPs, acting as free riders. The importance of the net
neutrality debate has motivated the research community
to study the interaction among the different tenants
from a theoretical point of view [1,2]. Despite the
interesting theoretical conclusions of the existing
works, an empirical econometric study on the
interaction of the new stakeholders was not possible
hitherto, as the main evolution of the OTT companies
took place at the end of the last decade and,
therefore, real economic data from the actual progress
of these firms were not available until recently. In
this article, we provide a detailed econometric study
to analyze the relationship between the OTT companies
and the ISPs. The empirical analysis has been conducted
for seven countries in the period 2008-2013,
considering ten major ISPs and three OTT companies that
offer communication services (i.e., Skype, Facebook and
WhatsApp), while we focus on five different parameters:
(i) the revenues of the ISPs, (ii) the revenues of the
OTT providers, (iii) the Capital Expenditure (CAPEX) of
the ISPs, (iv) the Internet penetration, and (v) the
real Gross Domestic Product (GDP) that determines the
economic performance of each country. For the analysis
of our cross-sectional time series (countries and year)
panel data, we propose two econometric models (based on
the fixed effects model) with two different dependent
variables: (i) Model A with the ISP revenues as the
dependent variable and (ii) Model B with the OTT
revenues as the dependent variable. The interpretation
of the results of Model A reveals two very intriguing
insights. First, we see that the revenues of the ISPs
and the OTT companies are positively correlated with a
particular coefficient of 9.81, i.e., the increase of
one unit (e.g., USD) in the revenue of the OTT
providers causes an average increase of approximately
ten units in the revenues of ISPs. Second, the CAPEX of
the ISPs has also a positive effect in their revenue
with a coefficient of 3.21. The positive correlation
between the revenues of the OTT companies and the ISPs
is also verified in Model B with a coefficient of 0.03,
which implies that the growth of ISPs has a positive
(although small) impact on the growth of OTT providers.
However, the most important conclusion that can be
extracted by Model B is the negative impact that the
CAPEX has on the OTT profits. More specifically, the
revenue of the OTT companies is reduced by 0.13 units
for every unit that the ISPs invest on the network
infrastructure. The observations of our empirical
analysis are very important, as they provide tangible
arguments and answers to the claims of the net
neutrality opponents. In particular, our study has
shown that the economic prosperity of the OTT firms has
a positive influence in the financial performance of
the ISPs. Consequently, it can be concluded that these
two important stakeholders fruitfully coexist in the
telecommunications and Internet market and they should
probably work more closely together to achieve a
mutually profitable cooperation. In addition, our
empirical results have also demonstrated that the
network investments have a positive effect on the ISPs
revenue and a negative impact on the revenue of the OTT
providers, thus refuting the accusations towards OTT
companies for free riding. Finally, although not
exhaustive, our study stresses the need for additional
similar studies that will further clarify the
interaction among the different entities in the
evolving Internet ecosystem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xia:2016:HMY,
author = "Chaolun Xia and Saikat Guha and S. Muthukrishnan",
title = "How Much is Your Attention Worth?: Analysis of Prices
in {LinkedIn} Advertising Network --- Short talk",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "38--38",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040243",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Online advertising is one of the pillars in the
Internet industry. An online ad network allows
advertisers to bid on reaching specific audience
through its targeting language. Google AdWords1, the
largest online ad network for instance, allows
advertisers to target audiences based on search terms,
the website (publisher) that the user browses, and
simple user demographics (gender, age group, location).
The price is set by a second-price auction [3]. Other
online ad networks, specifically that run by Facebook,
LinkedIn, Twitter and other Online Social Networks
(OSNs), offer much finer targeting controls. These OSNs
contain detailed information shared directly by the
user. This includes detailed educational records about
the user, past and present employment experience,
significant life events like changes in marital status
or having a baby. This helps OSNs start to be in a
position to offer advertisers significantly more
control in precisely targeting their audience.
LinkedIn, for instance, allows advertisers to target a
software engineer in Microsoft, or a user who masters
C++ but works in the medical industry. Facing a variety
of user segments, advertisers need guidance to compile
and tune their ad campaigns. Fortunately, Facebook [1]
and LinkedIn [2] satisfy the core of advertisers' need
by providing bid suggestion which is a function that,
for any targeting condition, shows the suggested bid to
win the auction and the number of users satisfying the
condition. This is exciting because (1) suggested bids
provide an economic view, i.e. the amount of money an
advertiser has to pay to reach audience of their
target, and (2) for the first time in the history of
advertising, these prices are now transparent for very
fine characteristics of users. Motivated by this
observation, we study the question how much is the
attention of a user worth. We approach this problem by
tapping into the bid suggestion function extensively.
We present comprehensive analyses of suggested bids in
LinkedIn with the following contributions. First, we
created a crawler and ran it for more than 100 days. As
a result, we harvest a large dataset consisting of 188,
260 suggested bids over 450 distinct targeting
conditions (of 8 common attributes) from LinkedIn.
Second, we present detailed analyses of suggested bids
from LinkedIn. We analyze their temporal and spatial
properties, and investigate their distributions over a
variety of user attributes related to career. We
discover many consistent results of suggested bids from
LinkedIn. They are generally stable over time. The
suggested bids of 50 states in the US negatively
correlate with per capita GDP and income of the states;
the suggested bids of users from different industries
vary a lot, and they negatively correlate with per
capita GDP of the industries. The suggested bids of
users with specific skills are positively correlated
with the demand-to-supply ratios of the skills in the
labor market in LinkedIn. We also observe that the user
working for a larger company or with a higher position
in the company is set with a higher suggested bid.
Detailed results could be found in the working paper
copy2. We also crawled suggested bids from Facebook
[4], and find that the suggested bids from these two
OSNs have a moderate positive correlation. Besides, we
observe that users with high or low income have higher
suggested bids than users with median income. To find
out reasons for the bias, it is interesting to study
this open question: what ads are shown to OSN users
with what attributes? Assuming that the suggested bid
is the actual cost to reach a qualifying user, we study
how advertisers can use these suggested bids
strategically. As a future work, we formulate the
targeting problem for advertisers' point of view. We
show through data analysis that targeting subsets of
users is a viable approach, and then we propose a
greedy algorithm to help advertisers reach up to 40\%
more target audience. The above targeting strategy
takes advantage of the arbitrage among the costs to
target different user sets. Although it benefits
advertisers, it might hurt OSNs, e.g. in terms of
revenue. Therefore, our another future work includes
devising a revenue maximizing pricing which eliminates
any potential arbitrage for the OSNs' point of view.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2016:PFR,
author = "Thi Thu Han Nguyen and Olivier Brun and Balakrishna J.
Prabhu",
title = "Performance of a fixed reward incentive scheme for
two-hop {DTNs} with competing relays: Short talk",
journal = j-SIGMETRICS,
volume = "44",
number = "3",
pages = "39--39",
month = dec,
year = "2016",
CODEN = "????",
DOI = "https://doi.org/10.1145/3040230.3040244",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 13 07:40:58 MST 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Onderwater:2017:TMI,
author = "Martijn Onderwater and Gerard Hoekstra and Rob van der
Mei",
title = "Throughput Modeling of the {IEEE MAC} for Sensor
Networks",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "2--9",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we provide a model for analyzing the
saturation throughput of the ieee 802.15.4 mac
protocol, which is the de-facto standard for wireless
sensor networks, ensuring fair access to the channel.
To this end, we introduce the concept of a natural
layer, which reflects the time that a sensor node
typically has to wait prior to sending a packet. The
model is simple and provides new insight how the
throughput depends on the protocol parameters and the
number of nodes in the network. Validation experiments
with simulations demonstrate that the model is highly
accurate for a wide range of parameter settings of the
mac protocol, and applicable to both large and small
networks. As a byproduct, we discuss fundamental
differences in the protocol stack and corresponding
throughput models of the popular 802.11 standard.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cardellini:2017:OOR,
author = "Valeria Cardellini and Vincenzo Grassi and Francesco
{Lo Presti} and Matteo Nardelli",
title = "Optimal Operator Replication and Placement for
Distributed Stream Processing Systems",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "11--22",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Exploiting on-the-fly computation, Data Stream
Processing (DSP) applications are widely used to
process unbounded streams of data and extract valuable
information in a near real-time fashion. As such, they
enable the development of new intelligent and pervasive
services that can improve our everyday life. To keep up
with the high volume of daily produced data, the
operators that compose a DSP application can be
replicated and placed on multiple, possibly
distributed, computing nodes, so to process the
incoming data flow in parallel. Moreover, to better
exploit the abundance of diffused computational
resources (e.g., Fog computing), recent trends
investigate the possibility of decentralizing the DSP
application placement. In this paper, we present and
evaluate a general formulation of the optimal DSP
replication and placement (ODRP) as an integer linear
programming problem, which takes into account the
heterogeneity of application requirements and
infrastructural resources. We integrate ODRP as
prototype scheduler in the Apache Storm DSP framework.
By leveraging on the DEBS 2015 Grand Challenge as
benchmark application, we show the benefits of a joint
optimization of operator replication and placement and
how ODRP can optimize different QoS metrics, namely
response time, internode traffic, cost, availability,
and a combination thereof.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gianniti:2017:FPN,
author = "Eugenio Gianniti and Alessandro Maria Rizzi and Enrico
Barbierato and Marco Gribaudo and Danilo Ardagna",
title = "Fluid {Petri} Nets for the Performance Evaluation of
{MapReduce} and {Spark} Applications",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "23--36",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Big Data applications allow to successfully analyze
large amounts of data not necessarily structured,
though at the same time they present new challenges.
For example, predicting the performance of frameworks
such as Hadoop and Spark can be a costly task, hence
the necessity to provide models that can be a valuable
support for designers and developers. Big Data systems
are becoming a central force in society and the use of
models can also enable the development of intelligent
systems providing Quality of Service (QoS) guarantees
to their users through runtime system reconfiguration.
This paper provides a new contribution in studying a
novel modeling approach based on fluid Petri nets to
predict MapReduce and Spark applications execution time
which is suitable for runtime performance prediction.
Models have been validated by an extensive experimental
campaign performed at CINECA, the Italian
supercomputing center, and on the Microsoft Azure
HDInsight data platform. Results have shown that the
achieved accuracy is around 9.5\% for Map Reduce and
about 10\% for Spark of the actual measurements on
average.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Longo:2017:ARQ,
author = "Francesco Longo and Rahul Ghosh and Vijay K. Naik and
Andrew J. Rindos and Kishor S. Trivedi",
title = "An Approach for Resiliency Quantification of Large
Scale Systems",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "37--48",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092825",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We quantify the resiliency of large scale systems upon
changes encountered beyond the normal system behavior.
Formal definitions for resiliency and change are
provided together with general steps for resiliency
quantification and a set of resiliency metrics that can
be used to quantify the effects of changes. A
formalization of the approach is also shown in the form
of a set of four algorithms that can be applied when
large scale systems are modeled through stochastic
analytic state space models (monolithic models or
interacting sub-models). In particular, in the case of
interacting submodels, since resiliency quantification
involves understanding the transient behavior of the
system, fixed-point variables evolve with time leading
to non-homogeneous Markov chains. At the best of our
knowledge, this is the first paper facing this problem
in a general way. The proposed approach is applied to
an Infrastructure-as-a-Service (IaaS) Cloud use case.
Specifically, we assess the impact of changes in demand
and available capacity on the Cloud resiliency and we
show that the approach proposed in this paper can scale
for a real sized Cloud without significantly
compromising the accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Canali:2017:ICP,
author = "Claudia Canali and Riccardo Lancellotti",
title = "Identifying Communication Patterns between Virtual
Machines in Software-Defined Data Centers",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "49--56",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "Modern cloud data centers typically exploit management
strategies to reduce the overall energy consumption.
While most of the solutions focus on the energy
consumption due to computational elements, the advent
of the Software-Defined Network paradigm opens the
possibility for more complex strategies taking into
account the network traffic exchange within the data
center. However, a network-aware Virtual Machine (VM)
allocation requires the knowledge of data communication
patterns, so that VMs exchanging significant amount of
data can be placed on the same physical host or on low
cost communication paths. In Infrastructure as a
Service data centers, the information about VMs traffic
exchange is not easily available unless a specialized
monitoring function is deployed over the data center
infrastructure. The main contribution of this paper is
a methodology to infer VMs communication patterns
starting from input/output network traffic time series
of each VM and without relaying on a special purpose
monitoring. Our reference scenario is a
software-defined data center hosting a multi-tier
application deployed using horizontal replication. The
proposed methodology has two main goals to support a
network-aware VMs allocation: first, to identify
couples of intensively communicating VMs through
correlation-based analysis of the time series; second,
to identify VMs belonging to the same vertical stack of
a multi-tier application. We evaluate the methodology
by comparing different correlation indexes, clustering
algorithms and time granularities to monitor the
network traffic. The experimental results demonstrate
the capability of the proposed approach to identify
interacting VMs, even in a challenging scenario where
the traffic patterns are similar in every VM belonging
to the same application tier.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bianchi:2017:MRB,
author = "Francesco Bianchi and Francesco {Lo Presti}",
title = "A {Markov} Reward based Resource-Latency Aware
Heuristic for the Virtual Network Embedding Problem",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "57--68",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092827",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "An ever increasing use of virtualization in various
emerging scenarios, e.g.: Cloud Computing, Software
Defined Networks, Data Streaming Processing, asks
Infrastructure Providers (InPs) to optimize the
allocation of the virtual network requests (VNRs) into
a substrate network while satisfying QoS requirements.
In this work, we propose MCRM, a two-stage virtual
network embedding (VNE) algorithm with delay and
placement constraints. Our solution revolves around a
novel notion of similarity between virtual and physical
nodes. To this end, taking advantage of Markov Reward
theory, we define a set of metrics for each physical
and virtual node which captures the amount of resources
in a node neighborhood as well as the degree of
proximity among nodes. By defining a notion of
similarity between nodes we then simply map virtual
nodes to the most similar physical node in the
substrate network. We have thoroughly evaluated our
algorithm through simulation. Our experiments show that
MCRM achieves good performance results in terms of
blocking probability and revenues for the InP, as well
as a high and uniform utilization of resources, while
satisfying the delay and placement requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Donatiello:2017:ASL,
author = "Lorenzo Donatiello and Gustavo Marfia",
title = "Analyzing and shaping the lifetime and the performance
of barrier coverage sensor networks",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "69--79",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we model and provide the means to extend
the lifetime of a barrier coverage sensor network
deployed for target detection. We consider a scenario
where sensors are randomly dropped on a bidimensional
field in order to detect target traversals which occur
in a stochastic way within a critical mission time.
Once a target enters a sensor's detection area, the
sensor transmits such information to a cluster head, in
charge of receiving and retransmitting the messages
received from the sensors deployed on the field. The
contribution of this work is fourfold. We first
identify the sensing nodes whose behavior is key to
model the duration of sensing operations, assuming
prior arrival and mobility models for target
traversals. We then proceed, providing a heuristic
estimation of the traffic received by the cluster head
to quantify its energy requirements, resorting to
specific lifetime definitions. We also evaluate the
relationship between our probabilistic and heuristic
models and the time until the barrier remains capable
of detecting and reporting the traversal of any target
to a sink, as obtained by simulation. Finally, we show
how the lifetime of such network may be shaped, with
the use of a sequential activation mechanism, for
example to combat the traversals of adversaries
exploiting the lifetime models obtained in this work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pinciroli:2017:CEM,
author = "Riccardo Pinciroli and Salvatore Distefano",
title = "Characterization and Evaluation of {Mobile
Crowdsensing} Performance and Energy Indicators",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "80--90",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mobile Crowdsensing (MCS) is a contribution-based
paradigm involving mobiles in pervasive application
deployment and operation, pushed by the ever-growing
and widespread dissemination of personal devices.
Nevertheless, MCS is still lacking of some key features
to become a disruptive paradigm. Among others, control
on performance and reliability, mainly due to the
contribution churning. For mitigating the impact of
churning, several policies such as redundancy,
over-provisioning and checkpointing can be adopted but,
to properly design and evaluate such policies, specific
techniques and tools are required. This paper attempts
to fill this gap by proposing a new technique for the
evaluation of relevant performance and energy figures
of merit for MCS systems. It allows to get insights on
them from three different perspectives: end users,
contributors and service providers. Based on queuing
networks (QN), the proposed technique relaxes the
assumptions of existing solutions allowing a stochastic
characterization of underlying phenomena through
general, non exponential distributions. To cope with
the contribution churning it extends the QN semantics
of a service station with variable number of servers,
implementing proper mechanisms to manage the memory
issues thus arising in the underlying process. This
way, a preliminary validation of the proposed QN model
against an analytic one and an in depth investigation
also considering checkpointing have been performed
through a case study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Totis:2017:OLK,
author = "Niccol{\'o} Totis and Laura Follia and Chiara Riganti
and Francesco Novelli and Francesca Cordero and Marco
Beccuti",
title = "Overcoming the lack of kinetic information in
biochemical reactions networks",
journal = j-SIGMETRICS,
volume = "44",
number = "4",
pages = "91--102",
month = mar,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3092819.3092830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 7 17:10:14 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A main aspect in computational modelling of biological
systems is the determination of model structure and
model parameters. Due to economical and technical
reasons, only part of these details are well
characterized, while the rest are unknown. To deal with
this difficulty, many reverse engineering and parameter
estimation methods have been proposed in the
literature, however these methods often need an amount
of experimental data not always available. In this
paper we propose an alternative approach, which
overcomes model indetermination solving an Optimization
Problem (OP) with an objective function that, similarly
to Flux Balance Analysis, is derived from an empirical
biological knowledge and does not require large amounts
of data. The system behaviour is described by a set of
Ordinary Differential Equations (ODE). Model
indetermination is resolved selecting time-varying
coefficients that maximize/ minimize the objective
function at each ODE integration step. Moreover, to
facilitate the modelling phase we provide a graphical
formalism, based on Petri Nets, which can be used to
derive the corresponding ODEs and OP. Finally, the
approach is illustrated on a case study focused on
cancer metabolism.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2017:DSM,
author = "Leana Golubchik",
title = "Delay Scalings and Mean-Field Limits in Networked
Systems",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "1--1",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3080572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load balancing mechanisms and scheduling algorithms
play a critical role in achieving efficient server
utilization and providing robust delay performance in a
wide range of networked systems. We will review some
celebrated schemes and optimality results which
typically assume that detailed state information, e.g.
exact knowledge of queue lengths, is available in
assigning jobs to queues or allocating a shared
resource among competing users. In practice, however,
obtaining such state information is non-trivial, and
usually involves a significant communication overhead
or delay, which is particularly a concern in
large-scale networked systems with massive numbers of
queues. These scalability issues have prompted
increasing attention for the implementation complexity
of load balancing and scheduling algorithms as a
crucial design criterion, besides the traditional
performance metrics. In this talk we examine the delay
performance in such networks for various load balancing
and scheduling algorithms, in conjunction with the
associated implementation overhead. In the first part
of the talk we focus on a scenario with a single
dispatcher where jobs arrive that need to be assigned
to one of several parallel queues. In the second part
of the talk we turn to a system with a single resource,
e.g. a shared wireless transmission medium, which is to
be allocated among several nodes. We will specifically
explore the delay scaling properties in a mean-field
framework where the total load and service capacity
grow large in proportion. The mean-field regime not
only offers analytical tractability, but is also highly
relevant given the immense numbers of servers in data
centers and cloud networks, and dense populations of
wireless devices and sensors in Internet-of-Things
(IoT) applications. Time permitting, we will also
discuss the impact of the underlying network structure
and a few open research challenges.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Avrachenkov:2017:LCA,
author = "Konstantin Avrachenkov and Jasper Goseling and Berksan
Serbetci",
title = "A Low-Complexity Approach to Distributed Cooperative
Caching with Geographic Constraints",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "2--2",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A promising means to increase efficiency of cellular
networks compared to existing architectures is to
proactively cache data in the base stations. The idea
is to store part of the data at the wireless edge and
use the backhaul only to refresh the stored data. Data
replacement will depend on the users' demand
distribution over time. As this distribution is varying
slowly, the stored data can be refreshed at off-peak
times. In this way, caches containing popular content
serve as helpers to the overall system and decrease the
maximum backhaul load [1-5]. Our goal in this paper is
on developing low-complexity distributed and
asynchronous content placement algorithms. This is of
practical relevance in cellular networks in which an
operator wants to optimize the stored content in caches
(i.e., base stations) while keeping the communication
in the network to a minimum. In that case it will help
that caches exchange information only locally.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:2017:OSE,
author = "Debankur Mukherjee and Souvik Dhara and Sem C. Borst
and Johan S. H. van Leeuwaarden",
title = "Optimal Service Elasticity in Large-Scale Distributed
Systems",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "3--3",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A fundamental challenge in large-scale cloud networks
and data centers is to achieve highly efficient server
utilization and limit energy consumption, while
providing excellent user-perceived performance in the
presence of uncertain and time-varying demand patterns.
Auto-scaling provides a popular paradigm for
automatically adjusting service capacity in response to
demand while meeting performance targets, and
queue-driven auto-scaling techniques have been widely
investigated in the literature. In typical data center
architectures and cloud environments however, no
centralized queue is maintained, and load balancing
algorithms immediately distribute incoming tasks among
parallel queues. In these distributed settings with
vast numbers of servers, centralized queue-driven
auto-scaling techniques involve a substantial
communication overhead and major implementation burden,
or may not even be viable at all. Motivated by the
above issues, we propose a joint auto-scaling and load
balancing scheme which does not require any global
queue length information or explicit knowledge of
system parameters, and yet provides provably
near-optimal service elasticity. We establish the
fluid-level dynamics for the proposed scheme in a
regime where the total traffic volume and nominal
service capacity grow large in proportion. The
fluid-limit results show that the proposed scheme
achieves asymptotic optimality in terms of
user-perceived delay performance as well as energy
consumption. Specifically, we prove that both the
waiting time of tasks and the relative energy portion
consumed by idle servers vanish in the limit. At the
same time, the proposed scheme operates in a
distributed fashion and involves only constant
communication overhead per task, thus ensuring
scalability in massive data center operations.
Extensive simulation experiments corroborate the
fluid-limit results, and demonstrate that the proposed
scheme can match the user performance and energy
consumption of state-of-the-art approaches that do take
full advantage of a centralized queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gong:2017:QPS,
author = "Long Gong and Paul Tune and Liang Liu and Sen Yang and
Jun (Jim) Xu",
title = "Queue-Proportional Sampling: a Better Approach to
Crossbar Scheduling for Input-Queued Switches",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "4--4",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most present day switching systems, in Internet
routers and data-center switches, employ a single
input-queued crossbar to interconnect input ports with
output ports. Such switches need to compute a matching,
between input and output ports, for each switching
cycle (time slot). The main challenge in designing such
matching algorithms is to deal with the unfortunate
tradeoff between the quality of the computed matching
and the computational complexity of the algorithm. In
this paper, we propose a general approach that can
significantly boost the performance of both SERENA and
iSLIP, yet incurs only $ O(1) $ additional
computational complexity at each input/output port. Our
approach is a novel proposing strategy, called
Queue-Proportional Sampling (QPS), that generates an
excellent starter matching. We show, through rigorous
simulations, that when starting with this starter
matching, iSLIP and SERENA can output much better final
matching decisions, as measured by the resulting
throughput and delay performance, than they otherwise
can.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ju:2017:HLS,
author = "Xiaoen Ju and Hani Jamjoom and Kang G. Shin",
title = "{Hieroglyph}: Locally-Sufficient Graph Processing via
Compute-Sync-Merge",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "5--5",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078589",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mainstream graph processing systems (such as Pregel
[3] and PowerGraph [1]) follow the bulk synchronous
parallel model. This design leads to the tight coupling
of computation and communication, where no vertex can
proceed to the next iteration of computation until all
vertices have been processed in the current iteration
and graph states have been synchronized across all
hosts. This coupling of computation and communication
incurs significant performance penalty. Fully
decoupling computation from communication requires (i)
restricted access to only local state during
computation and (ii) independence of inter-host
communication from computation. We call the combination
of both conditions local sufficiency. Local sufficiency
is not efficiently supported by state of the art.
Synchronous systems, by design, do not support local
sufficiency due to their intrinsic
computation-communication coupling. Even systems that
implement asynchronous execution only partially achieve
local sufficiency. For example, PowerGraph's
asynchronous mode satisfies local sufficiency by
distributed scheduling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2017:SYE,
author = "Lingda Li and Robel Geda and Ari B. Hayes and Yanhao
Chen and Pranav Chaudhari and Eddy Z. Zhang and Mario
Szegedy",
title = "A Simple Yet Effective Balanced Edge Partition Model
for Parallel Computing",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "6--6",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078520",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph edge partition models have recently become an
appealing alternative to graph vertex partition models
for distributed computing due to both their flexibility
in balancing loads and their performance in reducing
communication cost. In this paper, we propose a simple
yet effective graph edge partitioning algorithm. In
practice, our algorithm provides good partition quality
while maintaining low partition overhead. It also
outperforms similar state-of-the-art edge partition
approaches, especially for power-law graphs. In theory,
previous work showed that an approximation guarantee of
$ O(d_{\rm max} \sqrt (\log n \log k)) $ apply to the
graphs with $ m = \Omega (k^2) $ edges ($n$ is the
number of vertices, and $k$ is the number of
partitions). We further rigorously proved that this
approximation guarantee hold for all graphs. We also
demonstrate the applicability of the proposed edge
partition algorithm in real parallel computing systems.
We draw our example from GPU program locality
enhancement and demonstrate that the graph edge
partition model does not only apply to distributed
computing with many computer nodes, but also to
parallel computing in a single computer node with a
many-core processor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2017:OCS,
author = "Maxime C. Cohen and Philipp Keller and Vahab Mirrokni
and Morteza Zadimoghadddam",
title = "Overcommitment in Cloud Services Bin packing with
Chance Constraints",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "7--7",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers a traditional problem of resource
allocation, scheduling jobs on machines. One such
recent application is cloud computing, where jobs
arrive in an online fashion with capacity requirements
and need to be immediately scheduled on physical
machines in data centers. It is often observed that the
requested capacities are not fully utilized, hence
offering an opportunity to employ an overcommitment
policy, i.e., selling resources beyond capacity.
Setting the right overcommitment level can induce a
significant cost reduction for the cloud provider,
while only inducing a very low risk of violating
capacity constraints. We introduce and study a model
that quantifies the value of overcommitment by modeling
the problem as a bin packing with chance constraints.
We then propose an alternative formulation that
transforms each chance constraint into a submodular
function. We show that our model captures the risk
pooling effect and can guide scheduling and
overcommitment decisions. We also develop a family of
online algorithms that are intuitive, easy to implement
and provide a constant factor guarantee from optimal.
Finally, we calibrate our model using realistic
workload data, and test our approach in a practical
setting. Our analysis and experiments illustrate the
benefit of overcommitment in cloud services, and
suggest a cost reduction of 1.5\% to 17\% depending on
the provider's risk tolerance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Quach:2017:ILT,
author = "Alan Quach and Zhongjie Wang and Zhiyun Qian",
title = "Investigation of the {2016 Linux TCP} Stack
Vulnerability at Scale",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "8--8",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To combat blind in-window attacks against TCP, changes
proposed in RFC 5961 have been implemented by Linux
since late 2012. While successfully eliminating the old
vulnerabilities, the new TCP implementation was
reported in August 2016 to have introduced a subtle yet
serious security flaw. Assigned CVE-2016-5696, the flaw
exploits the challenge ACK rate limiting feature that
could allow an off-path attacker to infer the
presence/absence of a TCP connection between two
arbitrary hosts, terminate such a connection, and even
inject malicious payload. In this work, we perform a
comprehensive measurement of the impact of the new
vulnerability. This includes (1) tracking the
vulnerable Internet servers, (2) monitoring the patch
behavior over time, (3) picturing the overall security
status of TCP stacks at scale. Towards this goal, we
design a scalable measurement methodology to scan the
Alexa top 1 million websites for almost 6 months. We
also present how notifications impact the patching
behavior, and compare the result with the Heartbleed
and the Debian PRNG vulnerability. The measurement
represents a valuable data point in understanding how
Internet servers react to serious security flaws in the
operating system kernel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:CMP,
author = "Brandon Wang and Xiaoye Li and Leandro P. de Aguiar
and Daniel S. Menasche and Zubair Shafiq",
title = "Characterizing and Modeling Patching Practices of
Industrial Control Systems",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "9--9",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Industrial Control Systems (ICS) are widely deployed
in mission critical infrastructures such as
manufacturing, energy, and transportation. The mission
critical nature of ICS devices poses important security
challenges for ICS vendors and asset owners. In
particular, the patching of ICS devices is usually
deferred to scheduled production outages so as to
prevent potential operational disruption of critical
systems. In this paper, we present the results from our
longitudinal measurement and characterization study of
ICS patching behavior. Our analysis of more than 100
thousand Internet-exposed ICS devices reveals that
fewer than 30\% upgrade to newer patched versions
within 60 days of a vulnerability disclosure. Based on
our measurement and analysis, we further propose a
model to forecast the patching behavior of ICS
devices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:SGN,
author = "Sinong Wang and Ness Shroff",
title = "Security Game with Non-additive Utilities and Multiple
Attacker Resources",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "10--10",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078519",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been significant interest in studying
security games for modeling the interplay of attacks
and defenses on various systems involving critical
infrastructure, financial system security, political
campaigns, and civil safeguarding. However, existing
security game models typically either assume additive
utility functions, or that the attacker can attack only
one target. Such assumptions lead to tractable
analysis, but miss key inherent dependencies that exist
among different targets in current complex networks. In
this paper, we generalize the classical security game
models to allow for non-additive utility functions. We
also allow attackers to be able to attack multiple
targets. We examine such a general security game from a
theoretical perspective and provide a unified view. In
particular, we show that each security game is
equivalent to a combinatorial optimization problem over
a set system $ \epsilon $, which consists of defender's
pure strategy space. The key technique we use is based
on the transformation, projection of a polytope, and
the ellipsoid method. This work settles several open
questions in security game domain and extends the
state-of-the-art of both the polynomial solvable and
NP-hard class of the security game.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Braverman:2017:FMB,
author = "Anton Braverman and J. G. Dai and Xin Liu and Lei
Ying",
title = "Fluid-Model-Based Car Routing for Modern Ridesharing
Systems",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "11--12",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078595",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a closed queueing network model of
ridesharing systems such as Didi Chuxing, Lyft, and
Uber. We focus on empty-car routing, a mechanism by
which we control car flow in the network to optimize
system-wide utility functions, e.g. the availability of
empty cars when a passenger arrives. We establish both
process-level and steady-state convergence of the
queueing network to a fluid limit in a large market
regime where demand for rides and supply of cars tend
to infinity, and use this limit to study a fluid-based
optimization problem. We prove that the optimal network
utility obtained from the fluid-based optimization is
an upper bound on the utility in the finite car system
for any routing policy, both static and dynamic, under
which the closed queueing network has a stationary
distribution. This upper bound is achieved
asymptotically under the fluid-based optimal routing
policy. Simulation results with real-word data released
by Didi Chuxing demonstrate that the utility under the
fluid-based optimal routing policy converges to the
upper bound with a rate of $ 1 / \sqrt {N} $, where $N$
is the number of cars in the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kuhnle:2017:PSA,
author = "Alan Kuhnle and Tianyi Pan and Victoria G. Crawford
and Md Abdul Alim and My T. Thai",
title = "Pseudo-Separation for Assessment of Structural
Vulnerability of a Network",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "13--14",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Based upon the idea that network functionality is
impaired if two nodes in a network are sufficiently
separated in terms of a given metric, we introduce two
combinatorial pseudocut problems generalizing the
classical min-cut and multi-cut problems. We expect the
pseudocut problems will find broad relevance to the
study of network reliability. We comprehensively
analyze the computational complexity of the pseudocut
problems and provide three approximation algorithms for
these problems. Motivated by applications in
communication networks with strict Quality-of-Service
(QoS) requirements, we demonstrate the utility of the
pseudocut problems by proposing a targeted
vulnerability assessment for the structure of
communication networks using QoS metrics; we perform
experimental evaluations of our proposed approximation
algorithms in this context.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Deng:2017:CRA,
author = "Han Deng and I-Hong Hou",
title = "On the Capacity Requirement for Arbitrary End-to-End
Deadline and Reliability Guarantees in Multi-hop
Networks",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "15--16",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It has been shown that it is impossible to achieve
both stringent end-to-end deadline and reliability
guarantees in a large network without having complete
information of all future packet arrivals. In order to
maintain desirable performance in the presence of
uncertainty of future packet arrivals, common practice
is to add redundancy by increasing link capacities.
This paper studies the amount of capacity needed to
provide stringent performance guarantees and propose a
low-complexity online algorithm. Without adding
redundancy, we further propose a low-complexity
order-optimal online policy for the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2017:OSE,
author = "Maotong Xu and Sultan Alamro and Tian Lan and Suresh
Subramaniam",
title = "Optimizing Speculative Execution of Deadline-Sensitive
Jobs in Cloud",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "17--18",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we bring various speculative scheduling
strategies together under a unifying optimization
framework, which defines a new metric, Probability of
Completion before Deadlines (PoCD), to measure the
probability that MapReduce jobs meet their desired
deadlines. We propose an optimization problem to
jointly optimize PoCD and execution cost in different
strategies. Three strategies are prototyped on Hadoop
MapReduce and evaluated against two baseline strategies
using experiments. A 78\% net utility increase with up
to 94\% PoCD and 12\% cost improvement is achieved.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Islam:2017:SCM,
author = "Mohammad A. Islam and Xiaoqi Ren and Shaolei Ren and
Adam Wierman",
title = "A Spot Capacity Market to Increase Power
Infrastructure Utilization in Multi-Tenant Data
Centers",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "19--20",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the common practice of oversubscription, power
capacity is largely under-utilized in data centers. A
significant factor driving this under-utilization is
fluctuation of the aggregate power demand, resulting in
unused ``spot (power) capacity''. In this paper, we tap
into spot capacity for improving power infrastructure
utilization in multi-tenant data centers, an important
but under-explored type of data center where multiple
tenants house their own physical servers. We propose a
novel spot capacity market, called SpotDC, to allocate
spot capacity to tenants on demand. Specifically,
SpotDC extracts tenants' rack-level spot capacity
demand through an elastic demand function, based on
which the operator sets the market price for spot
capacity allocation. We evaluate SpotDC using both
testbed experiments and simulations, demonstrating that
SpotDC improves power infrastructure utilization and
creates a ``win-win'' situation: the data center
operator increases its profit (by nearly 10\%), while
tenants improve their performance (by 1.2--1.8$ \times
$ on average, yet at a marginal cost).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2017:HAO,
author = "Lin Yang and Mohammad H. Hajiesmaili and Hanling Yi
and Minghua Chen",
title = "Hour-Ahead Offering Strategies in Electricity Market
for Power Producers with Storage and Intermittent
Supply",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "21--22",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes online offering strategies for a
storage-assisted renewable power producer that
participates in hour-ahead electricity market. The
online strategy determines the offering price and
volume, while no exact or stochastic future information
is available in a time-coupled setting in the presence
of the storage. The proposed online strategy achieves
the best possible competitive ratio of $ O(\log \theta)
$, where $ \theta $ is the ratio between the maximum
and minimum clearing prices. Trace-driven experiments
demonstrate that the proposed strategy achieves
close-to-optimal performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gao:2017:WSL,
author = "Xing Gao and Zhang Xu and Haining Wang and Li Li and
Xiaorui Wang",
title = "Why {``Some''} Like It Hot Too: Thermal Attack on Data
Centers",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "23--24",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A trend in modern data centers is to raise the
temperature and maintain all servers in a relatively
hot environment. While this can save on cooling costs
given benign workloads running in servers, the hot
environment increases the risk of cooling failure. In
this work, we introduce the security concept of thermal
attack on a data center that exploits thermal-intensive
workloads to severely worsen the thermal conditions in
the data center. To unveil the vulnerability of a data
center to thermal attacks, we conduct thermal
measurements and propose effective thermal attack
vectors. To evaluate the impacts of thermal attacks
inside a data center, we simulate datacenter-level
thermal attacks using a real-world data center trace.
Our evaluation demonstrates that thermal attacks can
cause local hotspots, and even worse lead to cooling
failures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comden:2017:IRD,
author = "Joshua Comden and Zhenhua Liu and Yue Zhao",
title = "Incentivizing Reliable Demand Response with Customers'
Uncertainties and Capacity Planning",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "25--26",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the major issues with the integration of
renewable energy sources into the power grid is the
increased uncertainty and variability that they bring.
If this uncertainty is not sufficiently addressed, it
will limit the further penetration of renewables into
the grid and even result in blackouts. Compared to
energy storage, Demand Response (DR) has advantages to
provide reserves to the load serving entities (LSEs) in
a cost-effective and environmentally friendly way. DR
programs work by changing customers' loads when the
power grid experiences a contingency such as a mismatch
between supply and demand. Uncertainties from both the
customer-side and LSE-side make designing algorithms
for DR a major challenge. This paper makes the
following main contributions: (i) We propose DR control
policies based on the optimal structures of the offline
solution. (ii) A distributed algorithm is developed for
implementing the control policies without efficiency
loss. (iii) We further offer an enhanced policy design
by allowing flexibilities into the commitment level.
(iv) We perform real world trace based numerical
simulations which demonstrate that the proposed
algorithms can achieve near optimal social cost.
Details can be found in our extended version.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jadidi:2017:SPP,
author = "Amin Jadidi and Mohammad Arjomand and Mahmut Kandemir
and Chita Das",
title = "A Study on Performance and Power Efficiency of Dense
Non-Volatile Caches in Multi-Core Systems",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "27--28",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a novel cache design based on
Multi-Level Cell Spin-Transfer Torque RAM (MLC
STT-RAM).Our design exploits the asymmetric nature of
the MLC STT-RAM to build cache lines featuring
heterogeneous performances, that is, half of the cache
lines are read-friendly,while the other half are
write-friendly--this asymmetry in read/write latencies
are then used by a migration policy in order to
overcome the high latency of the baseline MLC cache.
Furthermore, in order to enhance the device lifetime,
we propose to dynamically deactivate ways of a set in
underutilized sets to convert MLC to Single-Level Cell
(SLC)mode.Our experiments show that our design gives an
average improvement of 12\% in system performance and
26\% in last-level cache(L3) access energy for various
workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiee:2017:SCD,
author = "Mehrnoosh Shafiee and Javad Ghaderi",
title = "Scheduling Coflows in Datacenter Networks: Improved
Bound for Total Weighted Completion Time",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "29--30",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Coflow is a recently proposed networking abstraction
to capture communication patterns in data-parallel
computing frameworks. We consider the problem of
efficiently scheduling coflows with release dates in a
shared datacenter network so as to minimize the total
weighted completion time of coflows. Specifically, we
propose a randomized algorithm with approximation ratio
of $ 3 e \approx 8.155 $, which improves the prior best
known ratio of $ 9 + 16 \sqrt {2 / 3} \approx 16.542 $.
For the special case when all coflows are released at
time zero, we obtain a randomized algorithm with
approximation ratio of $ 2 e \approximation 5.436 $
which improves the prior best known ratio of $ 3 + 2
\sqrt 2 \approximation 5.828 $. Simulation result using
a real traffic trace is presented that shows
improvement over the prior approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xiong:2017:CFG,
author = "Qin Xiong and Fei Wu and Zhonghai Lu and Yue Zhu and
You Zhou and Yibing Chu and Changsheng Xie and Ping
Huang",
title = "Characterizing {$3$D} Floating Gate {NAND} Flash",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "31--32",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we characterize a state-of-the-art 3D
floating gate NAND flash memory through comprehensive
experiments on an FPGA platform. Then, we present
distinct observations on performance and reliability,
such as operation latencies and various error patterns.
We believe that through our work, novel 3D NAND
flash-oriented designs can be developed to achieve
better performance and reliability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:2017:EMP,
author = "Yeon-sup Lim and Erich M. Nahum and Don Towsley and
Richard J. Gibbens",
title = "{ECF}: an {MPTCP} Path Scheduler to Manage
Heterogeneous Paths",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "33--34",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-Path TCP (MPTCP) is a new standardized transport
protocol that enables devices to utilize multiple
network interfaces. The default MPTCP path scheduler
prioritizes paths with the smallest round trip time
(RTT). In this work, we examine whether the default
MPTCP path scheduler can provide applications the ideal
aggregate bandwidth, i.e., the sum of available
bandwidths of all paths. Our experimental results show
that heterogeneous paths cause under-utilization of the
fast path, resulting in undesirable application
behaviors such as lower video streaming quality than
can be obtained using the available aggregate
bandwidth. To solve this problem, we propose and
implement a new MPTCP path scheduler, ECF (Earliest
Completion First), that utilizes all relevant
information about a path, not just RTT. Our results
show that ECF consistently utilizes all available paths
more efficiently than other approaches under path
heterogeneity, particularly for streaming video.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aktas:2017:SQH,
author = "Mehmet Fatih Aktas and Elie Najm and Emina Soljanin",
title = "Simplex Queues for Hot-Data Download",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "35--36",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In distributed systems, reliable data storage is
accomplished through redundancy, which has
traditionally been achieved by simple replication of
data across multiple nodes [6]. A special class of
erasure codes, known as locally repairable codes (LRCs)
[7], has started to replace replication in practice
[8], as a more storage-efficient way to provide a
desired reliability. It has recently been recognized,
that storage redundancy can also provide fast access of
stored data (see e.g. [5,9,10] and references therein).
Most of these papers consider download scenarios of all
jointly encoded pieces of data, and very few [11,12,14]
are concerned with download of only some, possibly hot,
pieces of data that are jointly encoded with those of
less interest. So far, only low traffic regime has been
partially addressed. In this paper, we are concerned
with hot data download from systems implementing a
special class of locally repairable codes, known as
LRCs with availability [13,15]. We consider simplex
codes, a particular subclass of LRCs with availability,
because (1) they are in a certain sense optimal [2] and
(2) they are minimally different from replication.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2017:EAF,
author = "Siddharth Singh and Vedant Nanda and Rijurekha Sen and
Sohaib Ahmad and Satadal Sengupta and Amreesh Phokeer
and Zaid Ahmed Farooq and Taslim Arefin Khan and
Ponnurangam Kumaragaguru and Ihsan Ayyub Qazi and David
Choffnes and Krishna P. Gummadi",
title = "An Empirical Analysis of {Facebook}'s Free Basics",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "37--38",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mena:2017:MTV,
author = "Jorge Mena and Peter Bankole and Mario Gerla",
title = "Multipath {TCP} on a {VANET}: a Performance Study",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "39--40",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Highly dynamic vehicular networks use long-range radio
technologies such as DSRC, WiMAX, and Cellular networks
to maintain connectivity. Multipath TCP offers the
possibility to combine these radio technologies to
improve network performance, allow robust handoffs, and
maintain vehicle connectivity at all times. The
proliferation of mobile devices with dual interfaces
and the manufacturers' interest to make their vehicles
smarter and more competitive create the ideal scenario
for MPTCP on VANETs. In this paper, we study the
performance of MPTCP on two VANET scenarios:
Vehicle-to-Infrastructure (V2I), and
Vehicle-to-Vehicle, (V2V) under distinct velocities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2017:FSD,
author = "Ye Yu and Djamal Belazzougui and Chen Qian and Qin
Zhang",
title = "A Fast, Small, and Dynamic Forwarding Information
Base",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "41--42",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Concise is a Forwarding information base (FIB) design
that uses very little memory to support fast query of a
large number of dynamic network names or flow IDs.
Concise makes use of minimal perfect hashing and the
SDN framework to design and implement the data
structure, protocols, and system. Experimental results
show that Concise uses significantly smaller memory to
achieve faster query speed compared to existing FIB
solutions and it can be updated very efficiently.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:2017:HHF,
author = "Ning Wu and Yingjie Bi and Nithin Michael and Ao Tang
and John Doyle and Nikolai Matni",
title = "{HFTraC}: High-Frequency Traffic Control",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "43--44",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose high-frequency traffic control (HFTraC), a
rate control scheme that coordinates the transmission
rates and buffer utilizations in routers network-wide
at fast timescale. HFTraC can effectively deal with
traffic demand fluctuation by utilizing available
buffer space in routers network-wide, and therefore
lead to significant performance improvement in terms of
tradeoff between bandwidth utilization and queueing
delay. We further note that the performance limit of
HFTraC is determined by the network architecture used
to implement it. We provide trace-driven evaluation of
the performance of HFTraC implemented in the proposed
architectures that vary from fully centralized to
completely decentralized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Basu:2017:ATB,
author = "Soumya Basu and Aditya Sundarrajan and Javad Ghaderi
and Sanjay Shakkottai and Ramesh Sitaraman",
title = "Adaptive {TTL}-Based Caching for Content Delivery",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "45--46",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Content Delivery Networks (CDNs) cache and serve a
majority of the user-requested content on the Internet,
including web pages, videos, and software downloads. We
propose two TTL-based caching algorithms that
automatically adapt to the heterogeneity, burstiness,
and non-stationary nature of real-world content
requests. The first algorithm called d-TTL dynamically
adapts a TTL parameter using a stochastic approximation
approach and achieves a given feasible target hit rate.
The second algorithm called f-TTL uses two caches, each
with its own TTL. The lower-level cache adaptively
filters out non-stationary content, while the
higher-level cache stores frequently-accessed
stationary content. We implement d-TTL and f-TTL and
evaluate both algorithms using an extensive nine-day
trace consisting of more than 500 million requests from
a production CDN server. We show that both d-TTL and
f-TTL converge to their hit rate targets with an error
of about 1.3\%. We also show that f-TTL requires a
significantly smaller cache size than d-TTL to achieve
the same hit rate, since it effectively filters out
rarely-accessed content.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mirrokni:2017:OOM,
author = "Vahab Mirrokni",
title = "Online Optimization for Markets and the Cloud: Theory
and Practice",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "47--48",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet applications provide interesting dynamic
environments for online optimization techniques. In
this talk, I will discuss a number of such problems in
the context of online markets, and in serving cloud
services. For online markets, I discuss problems in
online advertising. Online ads are delivered in a
real-time fashion under uncertainty in an environment
with strategic agents. Making such real-time (or
online) decisions without knowing the future is
challenging for repeated auctions. In this context, I
will first highlight the practical importance of
considering ``hybrid'' models that can take advantage
of forecasting, and at the same time, are robust
against adversarial changes in the input. In
particular, I discuss our recent results combining
stochastic and adversarial input models. Then I will
present more recent results concerning online bundling
schemes that can be applied to repeated auction
environments. In this part, I discuss ideas from our
recent papers about online bundling, stateful pricing,
bank account mechanisms, and Martingale auctions. For
problems on the cloud, I will touch upon two online
load balancing problems: one in the context of
consistent hashing with bounded loads for dynamic
environments, and one in the context of
multi-dimensional load balancing. Other than presenting
theoretical results on these topics, we show how some
of our new algorithmic techniques have been applied by
Google and other companies, and confirm their
significance in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ying:2017:SMM,
author = "Lei Ying",
title = "{Stein}'s Method for Mean-Field Approximations in
Light and Heavy Traffic Regimes",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "49--49",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078592",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mean-field analysis is an analytical method for
understanding large-scale stochastic systems such as
large-scale data centers and communication networks.
The idea is to approximate the stationary distribution
of a large-scale stochastic system using the
equilibrium point (called the mean-field limit) of a
dynamical system (called the mean-field model). This
approximation is often justified by proving the weak
convergence of stationary distributions to its
mean-field limit. Most existing mean-field models
concerned the light-traffic regime where the load of
the system, denote by $ \rho $, is strictly less than
one and is independent of the size of the system. This
is because a traditional mean-field model represents
the limit of the corresponding stochastic system.
Therefore, the load of the mean-field model is $ \rho =
\lim_{N \to \infty } \rho^{(N)} $, where $ \rho^{(N)} $
is the load of the stochastic system of size $N$. Now
if $ \rho^{(N)} \to 1$ as $ N \to \infty $ (i.e., in
the heavy-traffic regime), then $ \rho = 1.$ For most
systems, the mean-field limits when $ \rho = 1$ are
trivial and meaningless. To overcome this difficulty of
traditional mean-field models, this paper takes a
different point of view on mean-field models. Instead
of regarding a mean-field model as the limiting system
of large-scale stochastic system, it views the
equilibrium point of the mean-field model, called a
mean-field solution, simply as an approximation of the
stationary distribution of the finite-size system.
Therefore both mean-field models and solutions can be
functions of $N$. The proposed method focuses on
quantifying the approximation error. If the
approximation error is small (as we will show in two
applications), then we can conclude that the mean-field
solution is a good approximation of the stationary
distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2017:EVE,
author = "Nicolas Gast",
title = "Expected Values Estimated via Mean-Field Approximation
are {$ 1 / N $}-Accurate: Extended Abstract",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "50--50",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study the accuracy of mean-field
approximation. We show that, under general conditions,
the expectation of any performance functional converges
at rate $ O(1 / N) $ to its mean-field approximation.
Our result applies for finite and infinite-dimensional
mean-field models. We provide numerical experiments
that demonstrate that this rate of convergence is
tight.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2017:ASM,
author = "Wen Sun and V{\'e}ronique Simon and S{\'e}bastien
Monnet and Philippe Robert and Pierre Sens",
title = "Analysis of a Stochastic Model of Replication in Large
Distributed Storage Systems: a Mean-Field Approach",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "51--51",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed storage systems such as Hadoop File System
or Google File System (GFS) ensure data availability
and durability using replication. Persistence is
achieved by replicating the same data block on several
nodes, and ensuring that a minimum number of copies are
available on the system at any time. Whenever the
contents of a node are lost, for instance due to a hard
disk crash, the system regenerates the data blocks
stored before the failure by transferring them from the
remaining replicas. This paper is focused on the
analysis of the efficiency of replication mechanism
that determines the location of the copies of a given
file at some server. The variability of the loads of
the nodes of the network is investigated for several
policies. Three replication mechanisms are tested
against simulations in the context of a real
implementation of a such a system: Random, Least Loaded
and Power of Choice. The simulations show that some of
these policies may lead to quite unbalanced situations:
if {\ss} is the average number of copies per node it
turns out that, at equilibrium, the load of the nodes
may exhibit a high variability. It is shown in this
paper that a simple variant of a power of choice type
algorithm has a striking effect on the loads of the
nodes: at equilibrium, the distribution of the load of
a node has a bounded support, most of nodes have a load
less than 2{\ss} which is an interesting property for
the design of the storage space of these systems.
Stochastic models are introduced and investigated to
explain this interesting phenomenon.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2017:URV,
author = "Kevin K. Chang and Abdullah Giray Yaglik{\c{c}}i and
Saugata Ghose and Aditya Agrawal and Niladrish
Chatterjee and Abhijith Kashyap and Donghyuk Lee and
Mike O'Connor and Hasan Hassan and Onur Mutlu",
title = "Understanding Reduced-Voltage Operation in Modern
{DRAM} Devices: Experimental Characterization,
Analysis, and Mechanisms",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "52--52",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078590",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The energy consumption of DRAM is a critical concern
in modern computing systems. Improvements in
manufacturing process technology have allowed DRAM
vendors to lower the DRAM supply voltage
conservatively, which reduces some of the DRAM energy
consumption. We would like to reduce the DRAM supply
voltage more aggressively, to further reduce energy.
Aggressive supply voltage reduction requires a thorough
understanding of the effect voltage scaling has on DRAM
access latency and DRAM reliability. In this paper, we
take a comprehensive approach to understanding and
exploiting the latency and reliability characteristics
of modern DRAM when the supply voltage is lowered below
the nominal voltage level specified by manufacturers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2017:EDL,
author = "Wonil Choi and Mohammad Arjomand and Myoungsoo Jung
and Mahmut T. Kandemir",
title = "Exploiting Data Longevity for Enhancing the Lifetime
of Flash-based Storage Class Memory",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "53--53",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078527",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes to exploit the capability of
retention time relaxation in flash memories for
improving the lifetime of an SLC-based SSD. The main
idea is that as a majority of I/O data in a typical
workload do not need a retention time larger than a few
days, we can have multiple partial program states in a
cell and use every two states to store one-bit data at
each time. Thus, we can store multiple bits in a cell
(one bit at each time) without erasing it after each
write --- that would directly translates into lifetime
enhancement. The proposed scheme is called Dense-SLC
(D-SLC) flash design which improves SSD lifetime by
5.1X--8.6X.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2017:DIL,
author = "Donghyuk Lee and Samira Khan and Lavanya Subramanian
and Saugata Ghose and Rachata Ausavarungnirun and
Gennady Pekhimenko and Vivek Seshadri and Onur Mutlu",
title = "Design-Induced Latency Variation in Modern {DRAM}
Chips: Characterization, Analysis, and Latency
Reduction Mechanisms",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "54--54",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Variation has been shown to exist across the cells
within a modern DRAM chip. Prior work has studied and
exploited several forms of variation, such as
manufacturing-process- or temperature-induced
variation. We empirically demonstrate a new form of
variation that exists within a real DRAM chip, induced
by the design and placement of different components in
the DRAM chip: different regions in DRAM, based on
their relative distances from the peripheral
structures, require different minimum access latencies
for reliable operation. In particular, we show that in
most real DRAM chips, cells closer to the peripheral
structures can be accessed much faster than cells that
are farther. We call this phenomenon design-induced
variation in DRAM. Our goals are to (i) understand
design-induced variation that exists in real,
state-of-the-art DRAM chips, (ii) exploit it to develop
low-cost mechanisms that can dynamically find and use
the lowest latency at which to operate a DRAM chip
reliably, and, thus, (iii) improve overall system
performance while ensuring reliable system operation.
To this end, we first experimentally demonstrate and
analyze designed-induced variation in modern DRAM
devices by testing and characterizing 96 DIMMs (768
DRAM chips). Our experimental study shows that (i)
modern DRAM chips exhibit design-induced latency
variation in both row and column directions, (ii)
access latency gradually increases in the row direction
within a DRAM cell array (mat) and this pattern repeats
in every mat, and (iii) some columns require higher
latency than others due to the internal hierarchical
organization of the DRAM chip. Our characterization
identifies DRAM regions that are vulnerable to errors,
if operated at lower latency, and finds consistency in
their locations across a given DRAM chip generation,
due to design-induced variation. Variations in the
vertical and horizontal dimensions, together, divide
the cell array into heterogeneous-latency regions,
where cells in some regions require longer access
latencies for reliable operation. Reducing the latency
uniformly across all regions in DRAM would improve
performance, but can introduce failures in the
inherently slower regions that require longer access
latencies for correct operation. We refer to these
inherently slower regions of DRAM as design-induced
vulnerable regions. Based on our extensive experimental
analysis, we develop two mechanisms that reliably
reduce DRAM latency. First, DIVI Profiling uses runtime
profiling to dynamically identify the lowest DRAM
latency that does not introduce failures. DIVA
Profiling exploits design-induced variation and
periodically profiles only the vulnerable regions to
determine the lowest DRAM latency at low cost. It is
the first mechanism to dynamically determine the lowest
latency that can be used to operate DRAM reliably. DIVA
Profiling reduces the latency of read/write requests by
35.1\%/57.8\%, respectively, at 55C. Our second
mechanism, DIVA Shuffling, shuffles data such that
values stored in vulnerable regions are mapped to
multiple error-correcting code (ECC) codewords. As a
result, DIVA Shuffling can correct 26\% more multi-bit
errors than conventional ECC. Combined together, our
two mechanisms reduce read/write latency by
40.0\%/60.5\%, which translates to an overall system
performance improvement of 14.7\%/13.7\%/13.8\% (in
2-/4-/8-core systems) over a variety of workloads,
while ensuring reliable operation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gibbens:2017:HND,
author = "Mathias Gibbens and Chris Gniady and Lei Ye and
Beichuan Zhang",
title = "{Hadoop} on Named Data Networking: Experience and
Results",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "55--55",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In today's data centers, clusters of servers are
arranged to perform various tasks in a massively
distributed manner: handling web requests, processing
scientific data, and running simulations of real-world
problems. These clusters are very complex, and require
a significant amount of planning and administration to
ensure that they perform to their maximum potential.
Planning and configuration can be a long and
complicated process; once completed it is hard to
completely re-architect an existing cluster. In
addition to planning the physical hardware, the
software must also be properly configured to run on a
cluster. Information such as which server is in which
rack and the total network bandwidth between rows of
racks constrain the placement of jobs scheduled to run
on a cluster. Some software may be able to use hints
provided by a user about where to schedule jobs, while
others may simply place them randomly and hope for the
best. Every cluster has at least one bottleneck that
constrains the overall performance to less than the
optimal that may be achieved on paper. One common
bottleneck is the speed of the network: communication
between servers in a rack may be unable to saturate
their network connections, but traffic flowing between
racks or rows in a data center can easily overwhelm the
interconnect switches. Various network topologies have
been proposed to help mitigate this problem by
providing multiple paths between points in the network,
but they all suffer from the same fundamental problem:
it is cost-prohibitive to build a network that can
provide concurrent full network bandwidth between all
servers. Researchers have been working on developing
new network protocols that can make more efficient use
of existing network hardware through a blurring of the
line between network layer and applications. One of the
most well-known examples of this is Named Data
Networking (NDN), a data-centric network architecture
that has been in development for several years. While
NDN has received significant attention for wide-area
Internet, a detailed understanding of NDN benefits and
challenges in the data center environment has been
lacking. The Named Data Networking architecture
retrieves content by names rather than connecting to
specific hosts. It provides benefits such as highly
efficient and resilient content distribution, which fit
well to data-intensive distributed computing. This
paper presents and discusses our experience in
modifying Apache Hadoop, a popular MapReduce framework,
to operate on an NDN network. Through this
first-of-its-kind implementation process, we
demonstrate the feasibility of running an existing,
large, and complex piece of distributed software
commonly seen in data centers over NDN. We show
advantages such as simplified network code and reduced
network traffic, which are beneficial in a data center
environment. There are also challenges faced by NDN
that are being addressed by the community, which can be
magnified under data center traffic. Through detailed
evaluation, we show a reduction of 16\% for overall
data transmission between Hadoop nodes while writing
data with default replication settings. Preliminary
results also show promise for in-network caching of
repeated reads in distributed applications. We show
that while overall performance is currently slower
under NDN, there are challenges and opportunities for
further NDN improvements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:UBI,
author = "Cheng Wang and Bhuvan Urgaonkar and Neda Nasiriani and
George Kesidis",
title = "Using Burstable Instances in the Public Cloud: Why,
When and How?",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "56--56",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078591",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To attract more customers, public cloud providers
offer virtual machine (instance) types that trade off
lower prices for poorer capacities. As one salient
approach, the providers employ aggressive statistical
multiplexing of multiple cheaper instances on a single
physical server, resulting in tenants experiencing
higher dynamism in the resource capacity of these
instances. Examples of this are EC2's ``type''
instances and GCE's ``shared-core'' instances.We
collectively refer to these as burstable instances for
their ability to dynamically ``burst'' (increase the
capacity of) their resources. Burstable instances are
significantly cheaper than the ``regular'' instances,
and offer time-varying CPU capacity comprising a
minimum guaranteed base capacity/rate, which is much
smaller than a short-lived peak capacity that becomes
available upon operating at lower than base rate for a
sufficient duration. Table 1 summarizes our
classification of resource capacity dynamism for GCE
and EC2 instances along with the nature of disclosure
made by the provider. To exploit burstable instances
cost-effectively, a tenant would need to carefully
understand the significant additional complexity of
such instances beyond that disclosed by the
providers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Venkatakrishnan:2017:DRB,
author = "Shaileshh Bojja Venkatakrishnan and Giulia Fanti and
Pramod Viswanath",
title = "{Dandelion}: Redesigning the {Bitcoin} Network for
Anonymity",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "57--57",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078528",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cryptocurrencies are digital currencies that provide
cryptographic verification of transactions. In recent
years, they have transitioned from an academic research
topic to a multi-billion dollar industry. Bitcoin is
the best-known example of a cryptocurrency.
Cryptocurrencies exhibit two key properties:
egalitarianism and transparency. In this context,
egalitarianism means that no single party wields
disproportionate power over the network's operation.
This diffusion of power is achieved by asking other
network nodes (e.g., other Bitcoin users) to validate
transactions, instead of the traditional method of
using a centralized authority for this purpose.
Moreover, all transactions and communications are
managed over a fully-distributed, peer-to-peer (P2P)
network. Cryptocurrencies are transparent in the sense
that all transactions are verified and recorded with
cryptographic integrity guarantees; this prevents
fraudulent activity like double-spending of money.
Transparency is achieved through a combination of
clever cryptographic protocols and the publication of
transactions in a ledger known as a blockchain. This
blockchain serves as a public record of every financial
transaction in the network. A property that Bitcoin
does not provide is anonymity. Each user is identified
in the network by a public, cryptographic key. If one
were to link such a key to its owner's human identity,
the owner's financial history could be partially
learned from the public blockchain. In practice, it is
possible to link public keys to identities through a
number of channels, including the networking protocols
on which Bitcoin is built. This is a massive privacy
violation, and can be dangerous for deanonymized
users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jordan:2017:GBO,
author = "Michael Jordan",
title = "On Gradient-Based Optimization: Accelerated,
Distributed, Asynchronous and Stochastic",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "58--58",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078506",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many new theoretical challenges have arisen in the
area of gradient-based optimization for large-scale
statistical data analysis, driven by the needs of
applications and the opportunities provided by new
hardware and software platforms. I discuss several
recent results in this area, including: (1) a new
framework for understanding Nesterov acceleration,
obtained by taking a continuous-time,
Lagrangian/Hamiltonian perspective, (2) a general
theory of asynchronous optimization in multi-processor
systems, (3) a computationally-efficient approach to
stochastic variance reduction, (4) a primal-dual
methodology for gradient-based optimization that
targets communication bottlenecks in distributed
systems, and (5) a discussion of how to avoid
saddle-points in nonconvex optimization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2017:PDR,
author = "Prateek Sharma and David Irwin and Prashant Shenoy",
title = "Portfolio-driven Resource Management for Transient
Cloud Servers",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "59--59",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078511",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud providers have begun to offer their surplus
capacity in the form of low-cost transient servers,
which can be revoked unilaterally at any time. While
the low cost of transient servers makes them attractive
for a wide range of applications, such as data
processing and scientific computing, failures due to
server revocation can severely degrade application
performance. Since different transient server types
offer different cost and availability tradeoffs, we
present the notion of server portfolios that is based
on financial portfolio modeling. Server portfolios
enable construction of an ``optimal'' mix of severs to
meet an application's sensitivity to cost and
revocation risk. We implement model-driven portfolios
in a system called ExoSphere, and show how diverse
applications can use portfolios and
application-specific policies to gracefully handle
transient servers. We show that ExoSphere enables
widely-used parallel applications such as Spark, MPI,
and BOINC to be made transiency-aware with modest
effort. Our experiments show that allowing the
applications to use suitable transiency-aware policies,
ExoSphere is able to achieve 80\% cost savings when
compared to on-demand servers and greatly reduces
revocation risk compared to existing approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2017:OPP,
author = "Zijun Zhang and Zongpeng Li and Chuan Wu",
title = "Optimal Posted Prices for Online Cloud Resource
Allocation",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "60--60",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078529",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study online resource allocation in a cloud
computing platform, through a posted pricing mechanism:
The cloud provider publishes a unit price for each
resource type, which may vary over time; upon arrival
at the cloud system, a cloud user either takes the
current prices, renting resources to execute its job,
or refuses the prices without running its job there. We
design pricing functions based on the current resource
utilization ratios, in a wide array of demand-supply
relationships and resource occupation durations, and
prove worst-case competitive ratios of the pricing
functions in terms of social welfare. In the basic case
of a single-type, non-recycled resource (i.e.,
allocated resources are not later released for reuse),
we prove that our pricing function design is optimal,
in that any other pricing function can only lead to a
worse competitive ratio. Insights obtained from the
basic cases are then used to generalize the pricing
functions to more realistic cloud systems with multiple
types of resources, where a job occupies allocated
resources for a number of time slots till completion,
upon which time the resources are returned back to the
cloud resource pool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:OTS,
author = "Xin Wang and Richard T. B. Ma and Yinlong Xu",
title = "On Optimal Two-Sided Pricing of Congested Networks",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "61--61",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet Access Providers (APs) have built massive
network platforms by which end-users and Content
Providers (CPs) can connect and transmit data to each
other. Traditionally, APs adopt one-sided pricing
schemes and obtain revenues mainly from end-users. With
the fast development of data-intensive services, e.g.,
online video streaming and cloud-based applications,
Internet traffic has been growing rapidly. To sustain
the traffic growth and enhance user experiences, APs
have to upgrade network infrastructures and expand
capacities; however, they feel that the revenues from
end-users are insufficient to recoup the corresponding
costs. Consequently, some APs, e.g., Comcast and
AT{\&}T, have recently shifted towards two-sided
pricing schemes, i.e., they start to impose termination
fees on CPs' data traffic in addition to charging
end-users. Although some previous work has studied the
economics of two-sided pricing in network markets,
network congestion and its impacts on the utilities of
different parties were often overlooked. However, the
explosive traffic growth has caused severe congestion
in many regional and global networks, especially during
peak hours, which degrades end-users' experiences and
reduces their data demand. This will strongly affect
the profits of APs and the utilities of end-users and
CPs. For optimizing individual and social utilities,
APs and regulators need to reflect the design of
pricing strategies and regulatory policies accordingly.
So far, little is known about (1) the optimal two-sided
pricing structure in a congested network and its
changes under varying network environments, e.g.,
capacities of APs and congestion sensitivities of
users, and (2) potential regulations on two-sided
pricing for protecting social welfare from monopolistic
providers. To address these questions, one challenge is
to accurately capture endogenous congestion in
networks. Although the level of congestion is
influenced by network throughput, the users' traffic
demand and throughput are also influenced by network
congestion. It is crucial to capture this endogenous
congestion so as to faithfully characterize the impacts
of two-sided pricing in congested networks. In this
work, we propose a novel model of a two-sided congested
network built by an AP. We model network congestion as
a function of AP's capacity and network throughput,
which is also a function of the congestion level. We
use different forms of the functions to capture
congestion metric based on different service models,
e.g., M/M/1 queue or capacity sharing, and user traffic
based on different data types, e.g., online video or
text. We characterize users' population and traffic
demand under pricing and congestion parameters and
derive an endogenous system congestion under an
equilibrium. Based on the equilibrium model, we explore
the structures of two-sided pricing which optimize the
AP's profit and social welfare. We analyze the
sensitivities of the optimal pricing under varying
model parameters, e.g., the capacity of the AP and
congestion sensitivity of users. By comparing the two
types of optimal pricing, we derive regulatory
implications from the perspective of social welfare.
Besides, we also evaluate the incentives of the AP and
regulators to adopt the two-sided pricing instead of
the traditional one-sided pricing that only charges on
the user side.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oh:2017:MFF,
author = "Sewoong Oh",
title = "Matrix Factorization at the Frontier of Non-convex
Optimizations: Abstract for {SIGMETRICS 2017 Rising
Star Award} Talk",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "62--62",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3080573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Principal Component Analysis (PCA) and Canonical
Component Analysis (CCA) are two of the few examples of
non-convex optimization problems that can be solved
efficiently with sharp guarantees. This is achieved by
the classical and well-established understanding of
matrix factorizations. Recently, several new
theoretical and algorithmic challenges have arisen in
statistical learning over matrix factorizations,
motivated by various real-world applications. Despite
the inherent non-convex nature of these problem,
efficient algorithms are being discovered with provable
guarantees, extending the frontier of our understanding
of non-convex optimization problems. I will present
several recent results in this area in applications to
matrix completion and sensing, crowdsourcing, ranking,
and tensor factorization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2017:OIC,
author = "Hung T. Nguyen and Tri P. Nguyen and Tam N. Vu and
Thang N. Dinh",
title = "Outward Influence and Cascade Size Estimation in
Billion-scale Networks",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "63--63",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimating cascade size and nodes' influence is a
fundamental task in social, technological, and
biological networks. Yet this task is extremely
challenging due to the sheer size and the structural
heterogeneity of networks. We investigate a new
influence measure, termed outward influence (OI),
defined as the (expected) number of nodes that a subset
of nodes $S$ will activate, excluding the nodes in $S$.
Thus, OI equals, the de facto standard measure,
influence spread of $S$ minus $ |S|$. OI is not only
more informative for nodes with small influence, but
also, critical in designing new effective sampling and
statistical estimation methods. Based on OI, we propose
SIEA\slash SOIEA, novel methods to estimate influence
spread\slash outward influence at scale and with
rigorous theoretical guarantees. The proposed methods
are built on two novel components (1) IICP an important
sampling method for outward influence; and (2) RSA, a
robust mean estimation method that minimize the number
of samples through analyzing variance and range of
random variables. Compared to the state-of-the art for
influence estimation, SIEA is $ \Omega (\log^4 n)$
times faster in theory and up to several orders of
magnitude faster in practice. For the first time,
influence of nodes in the networks of billions of edges
can be estimated with high accuracy within a few
minutes. Our comprehensive experiments on real-world
networks also give evidence against the popular
practice of using a fixed number, e.g. 10K or 20K, of
samples to compute the ``ground truth'' for influence
spread.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2017:API,
author = "Giuliano Casale",
title = "Accelerating Performance Inference over Closed Systems
by Asymptotic Methods",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "64--64",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078514",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent years have seen a rapid growth of interest in
exploiting monitoring data collected from enterprise
applications for automated management and performance
analysis. In spite of this trend, even simple
performance inference problems involving queueing
theoretic formulas often incur computational
bottlenecks, for example upon computing likelihoods in
models of batch systems. Motivated by this issue, we
revisit the solution of multiclass closed queueing
networks, which are popular models used to describe
batch and distributed applications with parallelism
constraints. We first prove that the normalizing
constant of the equilibrium state probabilities of a
closed model can be reformulated exactly as a
multidimensional integral over the unit simplex. This
gives as a by-product novel explicit expressions for
the multiclass normalizing constant. We then derive a
method based on cubature rules to efficiently evaluate
the proposed integral form in small and medium-sized
models. For large models, we propose novel asymptotic
expansions and Monte Carlo sampling methods to
efficiently and accurately approximate normalizing
constants and likelihoods. We illustrate the resulting
accuracy gains in problems involving optimization-based
inference.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bondorf:2017:QCD,
author = "Steffen Bondorf and Paul Nikolaus and Jens B.
Schmitt",
title = "Quality and Cost of Deterministic Network Calculus:
Design and Evaluation of an Accurate and Fast
Analysis",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "65--65",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078594",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Networks are integral parts of modern safety-critical
systems and certification demands the provision of
guarantees for data transmissions. Deterministic
Network Calculus (DNC) can compute a worst-case bound
on a data flow's end-to-end delay. Accuracy of DNC
results has been improved steadily, resulting in two
DNC branches: the classical algebraic analysis (algDNC)
and the more recent optimization-based analysis
(optDNC). The optimization-based branch provides a
theoretical solution for tight bounds. Its
computational cost grows, however, (possibly
super-)exponentially with the network size.
Consequently, a heuristic optimization formulation
trading accuracy against computational costs was
proposed. In this paper, we challenge
optimization-based DNC with a novel algebraic DNC
algorithm. We show that: (1) no current optimization
formulation scales well with the network size and (2)
algebraic DNC can be considerably improved in both
aspects, accuracy and computational cost. To that end,
we contribute a novel DNC algorithm that transfers the
optimization's search for best attainable delay bounds
to algebraic DNC. It achieves a high degree of accuracy
and our novel efficiency improvements reduce the cost
of the analysis dramatically. In extensive numerical
experiments, we observe that our delay bounds deviate
from the optimization-based ones by only 1.142\% on
average while computation times simultaneously decrease
by several orders of magnitude.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Formby:2017:CSP,
author = "David Formby and Anwar Walid and Raheem Beyah",
title = "A Case Study in Power Substation Network Dynamics",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "66--66",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078525",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The modern world is becoming increasingly dependent on
computing and communication technology to function, but
unfortunately its application and impact on areas such
as critical infrastructure and industrial control
system (ICS) networks remains to be thoroughly studied.
Significant research has been conducted to address the
myriad security concerns in these areas, but they are
virtually all based on artificial testbeds or
simulations designed on assumptions about their
behavior either from knowledge of traditional IT
networking or from basic principles of ICS operation.
In this work, we provide the most detailed
characterization of an example ICS to date in order to
determine if these common assumptions hold true. A live
power distribution substation is observed over the
course of two and a half years to measure its behavior
and evolution over time. Then, a horizontal study is
conducted that compared this behavior with three other
substations from the same company. Although most
predictions were found to be correct, some unexpected
behavior was observed that highlights the fundamental
differences between ICS and IT networks including round
trip times dominated by processing speed as opposed to
network delay, several well known TCP features being
largely irrelevant, and surprisingly large jitter from
devices running real-time operating systems. The impact
of these observations is discussed in terms of
generality to other embedded networks, network security
applications, and the suitability of the TCP protocol
for this environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2017:PSM,
author = "You Zhou and Yian Zhou and Min Chen and Shigang Chen",
title = "Persistent Spread Measurement for Big Network Data
Based on Register Intersection",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "67--67",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078593",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Persistent spread measurement is to count the number
of distinct elements that persist in each network flow
for predefined time periods. It has many practical
applications, including detecting long-term stealthy
network activities in the background of normal-user
activities, such as stealthy DDoS attack, stealthy
network scan, or faked network trend, which cannot be
detected by traditional flow cardinality measurement.
With big network data, one challenge is to measure the
persistent spreads of a massive number of flows without
incurring too much memory overhead as such measurement
may be performed at the line speed by network
processors with fast but small on-chip memory. We
propose a highly compact Virtual Intersection
HyperLogLog (VI-HLL) architecture for this purpose. It
achieves far better memory efficiency than the best
prior work of V-Bitmap, and in the meantime drastically
extends the measurement range. Theoretical analysis and
extensive experiments demonstrate that VI-HLL provides
good measurement accuracy even in very tight memory
space of less than 1 bit per flow.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:2017:DEC,
author = "Yi Cao and Javad Nejati and Muhammad Wajahat and Aruna
Balasubramanian and Anshul Gandhi",
title = "Deconstructing the Energy Consumption of the Mobile
Page Load",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "68--68",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3078587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mobile Web page performance is critical to content
providers, service providers, and users, as Web
browsers are one of the most popular apps on phones.
Slow Web pages are known to adversely affect profits
and lead to user abandonment. While improving mobile
web performance has drawn increasing attention, most
optimizations tend to overlook an important factor,
energy. Given the importance of battery life for mobile
users, we argue that web page optimizations should be
evaluated for their impact on energy consumption.
However, examining the energy effects of a web
optimization is challenging, even if one has access to
power monitors, for several reasons. First, the page
load process is relatively short-lived, ranging from
several milliseconds to a few seconds. Fine-grained
resource monitoring on such short timescales to model
energy consumption is known to incur substantial
overhead. Second, Web pages are complex. A Web
enhancement can have widely varying effects on
different page load activities. Thus, studying the
energy impact of a Web enhancement on page loads
requires understanding its effects on each page load
activity. Existing approaches to analyzing mobile
energy typically focus on profiling and modeling the
resource consumption of the device during execution.
Such approaches consider long-running services and apps
such as games, audio, and video streaming, for which
low-overhead, coarse-grained resource monitoring
suffices. For page loads, however, coarse-grained
resource monitoring is not sufficient to analyze the
energy consumption of individual, short-lived, page
load activities. We present RECON (REsource- and
COmpoNent-based modeling), a modeling approach that
addresses the above challenges to estimate the energy
consumption of any Web page load. The key intuition
behind RECON is to go beyond resource-level information
and exploit application-level semantics to capture the
individual Web page load activities. Instead of
modeling the energy consumption at the full page load
level, which is too coarse grained, RECON models at a
much finer component level granularity. Components are
individual page load activities such as loading
objects, parsing the page, or evaluating JavaScript. To
do this, RECON combines coarse-grained resource
utilization and component-level Web page load
information available from existing tools. During the
initial training stage, RECON uses a power monitor to
measure the energy consumption during a set of page
load processes and juxtaposes this power consumption
with coarse-grained resource and component information.
RECON uses both simple linear regression and more
complex neural networks to build a model of the power
consumption as a function of the resources used and the
individual page load components, thus providing
benefits over individual models. Using the model, RECON
can estimate the energy consumption of any Web page
loaded as-is or upon applying any enhancement, without
the monitor. We experimentally evaluate RECON on the
Samsung Galaxy S4, S5, and Nexus devices using 80 Web
pages. Comparisons with actual power measurements from
a fine-grained power meter show that, using the linear
regression model, RECON can estimate the energy
consumption of the entire page load with a mean error
of 6.3\% and that of individual page load activity
segments with a mean error of 16.4\%. When trained as a
neural network, RECON's mean error for page energy
estimation reduces to 5.4\% and the mean segment error
is 16.5\%. We show that RECON can accurately estimate
the energy consumption of a Web page under different
network conditions, such as lower bandwidth or higher
RTT, even when the model is trained under a default
network condition. RECON also accurately estimates the
energy consumption of a Web page after applying popular
Web enhancements including ad blocking, inlining,
compression, and caching.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2017:RMP,
author = "Richard T. B. Ma and Vishal Misra",
title = "Routing Money, Not Packets: a Tutorial on {Internet}
Economics",
journal = j-SIGMETRICS,
volume = "45",
number = "1",
pages = "69--70",
month = jun,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3143314.3083764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 18 17:31:18 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial is in the broad area of Internet
Economics, specifically applying ideas from game
theory, both Cooperative and Non-Cooperative. We
consider the origins of the Internet architecture, and
the evolution of the Internet ecosystem from a protocol
and application standpoint. We next look at the
evolution of the pricing structure on the Internet
along three different dimensions: (a) between ISPs, (b)
between ISPs and content providers, and (c) between
ISPs and end users. We present mathematical models
describing the pricing structures in each dimension,
the interaction between the three and competition
amongst the entities leading to the notion of Network
Neutrality. We look at various definitions of Network
Neutrality and analyze the the impact of mechanisms
like paid peering, zero rating and differential pricing
on the principle of Network Neutrality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2017:ORC,
author = "Mark S. Squillante",
title = "On the Optimality of Reflection Control, with
Production-Inventory Applications",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "3--5",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the control of a Brownian motion (BM) with a
negative drift, so as to minimize a long--run average
cost objective. We show the optimality of a class of
reflection controls that prevent the BM from dropping
below some negative level r, by cancelling out from
time to time part of the negative drift; and this
optimality is established for any holding cost function
h(x) that is increasing in |x|. Furthermore, we show
the optimal reflection level can be derived as the
fixed point that equates the long--run average cost to
the holding cost. We also show the asymptotic
optimality of this reflection control when it is
applied to production--inventory systems driven by
discrete counting processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feinberg:2017:SPA,
author = "Eugene A. Feinberg and Jefferson Huang",
title = "Strongly Polynomial Algorithms for Transient and
Average-Cost {MDPs}",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "6--8",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152045",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers transient total-cost MDPs with
transition rates whose values may be greater than one,
and average-cost MDPs satisfying the condition that the
expected time to hit a certain state from any initial
state and under any stationary policy is bounded above
by a constant. Linear programming formulations for such
MDPs are provided that are solvable in strongly
polynomial time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joshi:2017:BSC,
author = "Gauri Joshi",
title = "Boosting Service Capacity via Adaptive Task
Replication",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "9--11",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aktas:2017:ESM,
author = "Mehmet Fatih Aktas and Pei Peng and Emina Soljanin",
title = "Effective Straggler Mitigation: Which Clones Should
Attack and When?",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "12--14",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2017:NCC,
author = "Weixuan Lin and John Z. F. Pang and Eilyan Bitar and
Adam Wierman",
title = "Networked {Cournot} Competition in Platform Markets:
Access Control and Efficiency Loss",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "15--17",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies network design and efficiency loss
in open and discriminatory access platforms under
networked Cournot competition. In open platforms, every
firm connects to every market, while discriminatory
platforms limit connections between firms and markets
to improve social welfare. We provide tight bounds on
the efficiency loss of both platforms; (i) that the
efficiency loss at a Nash equilibrium under open access
is bounded by 3/2, and (ii) for discriminatory access
platforms, we provide a greedy algorithm for optimizing
network connections that guarantees efficiency loss at
a Nash equilibrium is bounded by 4/3, under an
assumption on the linearity of cost functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maxey:2017:WAB,
author = "Tyler Maxey and Hakjin Chung and Hyun-Soo Ahn and
Rhonda Righter",
title = "When is Anarchy Beneficial?",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "18--20",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152049",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many service systems, customers acting to maximize
their individual utility (selfish customers) will
result in a policy that does not maximize their overall
utility; this effect is known as the Price of Anarchy
(PoA). More specifically, the PoA, defined to be the
ratio of selfish utility (the overall average utility
for selfish customers) to collective utility (the
overall average utility if customers act to maximize
their overall average utility) is generally less than
one. Of course, when the environment is fixed, the best
case PoA is one, by definition of the maximization
problem. However, we show that in systems with
feedback, where the environment may change depending on
customer behavior, there can be a Benefit of Anarchy,
i.e., we can have a PoA that is strictly larger than
one. We give an example based on a Stackelberg game
between a service provider and customers in a
singleserver queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panigrahy:2017:HRV,
author = "Nitish K. Panigrahy and Jian Li and Don Towsley",
title = "Hit Rate vs. Hit Probability Based Cache Utility
Maximization",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "21--23",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2017:LCU,
author = "Bo Jiang and Philippe Nain and Don Towsley",
title = "{LRU} Cache under Stationary Requests",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "24--26",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we extend an approximation first
proposed by Fagin [4] for the LRU cache under the
independence reference model to systems where requests
for different contents form independent stationary and
ergodic processes. We show that this approximation
becomes exact as the number of contents goes to
infinity while maintaining the fraction of the contents
that can populate the cache to be constant. Last, we
provide results on the rate of convergence.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:2017:TFS,
author = "Gautam Goel and Niangjun Chen and Adam Wierman",
title = "Thinking Fast and Slow: Optimization Decomposition
Across Timescales",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "27--29",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many real-world control systems, such as the smart
grid and software defined networks, have decentralized
components that react quickly using local information
and centralized components that react slowly using a
more global view. This work seeks to provide a
theoretical framework for how to design controllers
that are decomposed across timescales in this way. The
framework is analogous to how the network utility
maximization framework uses optimization decomposition
to distribute a global control problem across
independent controllers, each of which solves a local
problem; except our goal is to decompose a global
problem temporally, extracting a timescale separation.
Our results highlight that decomposition of a
multi-timescale controller into a fast timescale,
reactive controller and a slow timescale, predictive
controller can be near-optimal in a strong sense. In
particular, we exhibit such a design, named
Multi-timescale Reflexive Predictive Control (MRPC),
which maintains a per-timestep cost within a constant
factor of the offline optimal in an adversarial
setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{London:2017:DOL,
author = "Palma London and Niangjun Chen and Shai Vardi and Adam
Wierman",
title = "Distributed Optimization via Local Computation
Algorithms",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "30--32",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152053",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new approach for distributed optimization
based on an emerging area of theoretical computer
science --- local computation algorithms. The approach
is fundamentally different from existing methodologies
and provides a number of benefits, such as robustness
to link failure and adaptivity in dynamic settings.
Specifically, we develop an algorithm, LOCO, that given
a convex optimization problem P with n variables and a
``sparse'' linear constraint matrix with m constraints,
provably finds a solution as good as that of the best
online algorithm for P using only O(log(n+m)) messages
with high probability. The approach is not iterative
and communication is restricted to a localized
neighborhood. In addition to analytic results, we show
numerically that the performance improvements over
classical approaches for distributed optimization are
significant, e.g., it uses orders of magnitude less
communication than ADMM.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aveklouris:2017:EVC,
author = "Angelos Aveklouris and Yorie Nakahira and Maria
Vlasiou and Bert Zwart",
title = "Electric vehicle charging: a queueing approach",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "33--35",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The number of electric vehicles (EVs) is expected to
increase. As a consequence, more EVs will need
charging, potentially causing not only congestion at
charging stations, but also in the distribution grid.
Our goal is to illustrate how this gives rise to
resource allocation and performance problems that are
of interest to the Sigmetrics community.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2017:OSJ,
author = "Ziv Scully and Guy Blelloch and Mor Harchol-Balter and
Alan Scheller-Wolf",
title = "Optimally Scheduling Jobs with Multiple Tasks",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "36--38",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider optimal job scheduling where each job
consists of multiple tasks, each of unknown duration,
with precedence constraints between tasks. A job is not
considered complete until all of its tasks are
complete. Traditional heuristics, such as favoring the
job of shortest expected remaining processing time, are
suboptimal in this setting. Furthermore, even if we
know which job to run, it is not obvious which task
within that job to serve. In this paper, we
characterize the optimal policy for a class of such
scheduling problems and show that the policy is simple
to compute.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2017:LDIa,
author = "Yuliy Baryshnikov and Abram Magner",
title = "Large Deviations for Increasing Subsequences of
Permutations and a Concurrency Application",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "39--41",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The study of concurrent processes with conflict points
is connected with the geometry of increasing
subsequences of permutations --- a permutation encodes
the transactions of two processes that conflict (i.e.,
must be executed serially), and a given increasing
subsequence encodes one particular serialization of the
executions of two processes. This motivates the study
of random increasing subsequences of random
permutations. Here, we give a large deviation principle
which implies that such a subsequence never deviates
too far from the identity permutation: a random
serialization of two concurrent processes will not
favor either process too much at any given time. We
then give an efficient exact algorithm for uniform
random sampling of an increasing subsequence from a
given permutation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ganguly:2017:LSN,
author = "Arnab Ganguly and Kavita Ramanan and Philippe Robert
and Wen Sun",
title = "A Large-Scale Network with Moving Servers",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "42--44",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Foss:2017:JIQ,
author = "Sergey Foss and Alexander L. Stolyar",
title = "Join-Idle-Queue system with general service times:
Large-scale limit of stationary distributions",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "45--47",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152058",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A parallel server system with $n$ identical servers is
considered. The service time distribution has a finite
mean $ 1 / \mu $, but otherwise is arbitrary. Arriving
customers are to be routed to one of the servers
immediately upon arrival. Join-Idle-Queue routing
algorithm is studied, under which an arriving customer
is sent to an idle server, if such is available, and to
a randomly uniformly chosen server, otherwise. We
consider the asymptotic regime where $ n \to \infty $
and the customer input flow rate is $ \lambda n $.
Under the condition $ \lambda / \mu < 1 / 2 $, we prove
that, as $ n \to \infty $, the sequence of
(appropriately scaled) stationary distributions
concentrates at the natural equilibrium point, with the
fraction of occupied servers being constant equal $
\lambda / \mu $. In particular, this implies that the
steady-state probability of an arriving customer having
to wait for service vanishes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2017:NCD,
author = "Yingdong Lu and Mark S. Squillante and Chai Wah Wu",
title = "Nearly Completely Decomposable Epidemic-Like
Stochastic Processes with Time-Varying Behavior",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "48--50",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comden:2017:DAD,
author = "Joshua Comden and Zhenhua Liu and Yue Zhao",
title = "Distributed Algorithm Design for Probabilistic Demand
Response",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "52--54",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hajiesmaili:2017:SRR,
author = "Mohammad H. Hajiesmaili and Minghua Chen and Enrique
Mallada and Chi-Kin Chau",
title = "Summary of Recent Results: Crowd-Sourced
Storage-Assisted Demand Response in Microgrids",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "55--57",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152062",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the problem of utilizing energy
storage systems to perform demand-response in
microgrids. The objective is to minimize the
operational cost while balancing the supply-and-demand
mismatch. The design space is to select and schedule a
subset of heterogeneous storage devices that arrive
online with different availabilities. Designing a
performance-optimized solution is challenging due to
the existence of mixed packing and covering constraints
in a combinatorial problem, and the essential need for
online design. We devise an online algorithm and show
that it achieves logarithmic bi-criteria competitive
ratio. Experimental results demonstrate the
effectiveness of our algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Le:2017:OEPa,
author = "Tan N. Le and Jie Liang and Zhenhua Liu and Ramesh K.
Sitaraman and Jayakrishnan Nair and Bong Jun Choi",
title = "Optimal Energy Procurement for Geo-distributed Data
Centers in Multi-timescale Electricity Markets",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "58--63",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Heavy power consumers, such as cloud providers and
data center operators, can significantly benefit from
multi-timescale electricity markets by purchasing some
of the needed electricity ahead of time at cheaper
rates. However, the energy procurement strategy for
data centers in multi-timescale markets becomes a
challenging problem when real world dynamics, such as
the spatial diversity of data centers and the
uncertainty of renewable energy, IT workload, and
electricity price, are taken into account. In this
paper, we develop energy procurement algorithms for
geo-distributed data centers that utilize
multi-timescale markets to minimize the electricity
procurement cost. We propose two algorithms. The first
algorithm provides provably optimal cost minimization
while the other achieves near-optimal cost at a much
lower computational cost. We empirically evaluate our
energy procurement algorithms using real-world traces
of renewable energy, electricity prices, and the
workload demand. Our empirical evaluations show that
our proposed energy procurement algorithms save up to
44\% of the total cost compared to traditional
algorithms that do not use multi-timescale electricity
markets or geographical load balancing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:PBP,
author = "Wei Wang and Nanpeng Yu",
title = "Phase Balancing in Power Distribution Network with
Data Center",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "64--69",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "High degree of unbalance in electric distribution
feeders can significantly affect power quality, damage
electrical equipment, and result in tripping of
protective devices. If not properly managed,
integration of new data center and distributed energy
resources into the power distribution network will
exacerbate the problem. This paper proposes a new
paradigm which coordinates the operation of data center
and distributed energy resources to reduce phase
unbalance and improve the reliability and efficiency of
electric distribution networks. The coordination scheme
is implemented within the framework of a distribution
system operator managed electricity market. The
proposed phase balancing algorithm with data center is
validated using a modified IEEE distribution test
feeder. The simulation results show the proposed data
center and distributed energy resources coordination
scheme not only significantly reduces the degree of
unbalance of distribution feeders but also results in
sizable reduction in data center electricity costs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oi:2017:CSE,
author = "Hitoshi Oi",
title = "A Case Study of Energy Efficiency on a Heterogeneous
Multi-Processor",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "70--72",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152065",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this extended abstract, we present a case study of
powerefficiency on a heterogeneous multi-core
processor, Exynos 5422 based on the ARM big.LITTLE
architecture. We show the effect of thermal management
on the big (faster) cores and the comparisons between
big and LITTLE (slower) cores using the EEMBC
CoreMark-Pro benchmarks. As expected, the LITTLE cores
are more energy efficient than the big cores at the
maximum performances of both cores for all workloads.
However, the big cores are similarly or more power
efficient as LITTLE cores for 5 out of 9 workloads when
the performance of both cores are matched by lowering
the clock frequency of big cores. Delay-insertion for
matching the performance is only effective for one
workload, but it may be useful in a multi-programmed
environment when the frequency of each core cannot be
set independently (which is the case for Exynos).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:GPE,
author = "Qiang Wang and Xiaowen Chu",
title = "{GPGPU} Power Estimation with Core and Memory
Frequency Scaling",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "73--78",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152066",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the increasing installation of Graphics
Processing Units (GPUs) in supercomputers and data
centers, their huge electricity cost brings new
environmental and economic concerns. Although Dynamic
Voltage and Frequency Scaling (DVFS) techniques have
been successfully applied on traditional CPUs to
reserve energy, the impact of GPU DVFS on application
performance and power consumption is not yet fully
understood, mainly due to the complicated GPU memory
system. This paper proposes a fast prediction model
based on Support Vector Regression (SVR), which can
estimate the average runtime power of a given GPU
kernel using a set of profiling parameters under
different GPU core and memory frequencies. Our
experimental data set includes 931 samples obtained
from 19 GPU kernels running on a real GPU platform with
the core and memory frequencies ranging between 400MHz
and 1000MHz. We evaluate the accuracy of the SVR-based
prediction model by ten-fold cross validation. We
achieve greater accuracy than prior models, being Mean
Square Error (MSE) of 0.797 Watt and Mean Absolute
Percentage Error (MAPE) of 3.08\% on average. Combined
with an existing performance prediction model, we can
find the optimal GPU frequency settings that can save
an average of 13.2\% energy across those GPU kernels
with no more than 10\% performance penalty compared to
applying the default setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2017:BBS,
author = "Dong Chen and David Irwin",
title = "Black-box Solar Performance Modeling: Comparing
Physical, Machine Learning, and Hybrid Approaches",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "79--84",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152067",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing penetration of solar power in the grid
has motivated a strong interest in developing real-time
performance models that estimate solar output based on
a deployment's unique location, physical
characteristics, and weather conditions. Solar models
are useful for a variety of solar energy analytics,
including indirect monitoring, forecasting,
disaggregation, anonymous localization, and fault
detection. Significant recent work focuses on learning
``black box'' models, primarily for forecasting, using
machine learning (ML) techniques, which leverage only
historical energy and weather data for training.
Interestingly, these ML techniques are often ``off the
shelf'' and do not incorporate well-known physical
models of solar generation based on fundamental
properties. Instead, prior work on physical modeling
generally takes a ``white box'' approach that assumes
detailed knowledge of a deployment. In this paper, we
survey existing work on solar modeling, and then
compare black-box solar modeling using ML versus
physical approaches. We then (i) present a configurable
hybrid approach that combines the benefits of both by
enabling users to select the parameters they physically
model versus learn via ML, and (ii) show that it
significantly improves model accuracy across 6
deployments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{You:2017:BSA,
author = "Pengcheng You and Youxian Sun and John Pang and Steven
Low and Minghua Chen",
title = "Battery Swapping Assignment for Electric Vehicles: a
Bipartite Matching Approach",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "85--87",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152068",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper formulates a multi-period optimal station
assignment problem for electric vehicle (EV) battery
swapping that takes into account both temporal and
spatial couplings. The goal is to reduce the total EV
cost and station congestion due to temporary shortage
in supply of available batteries. We show that the
problem is reducible to the minimum weight perfect
bipartite matching problem. This leads to an efficient
solution based on the Hungarian algorithm. Numerical
results suggest that the proposed solution provides a
significant improvement over a greedy heuristic that
assigns EVs to nearest stations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doan:2017:DLM,
author = "Thinh T. Doan and Subhonmesh Bose and Carolyn L.
Beck",
title = "Distributed {Lagrangian} Method for Tie-Line
Scheduling in Power Grids under Uncertainty",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "88--90",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152069",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "System operators (SOs) manage the grid and its assets
in different parts (areas) of an interconnected power
network. One would ideally seek to co-optimize the grid
assets across multiple areas by solving a centralized
optimization problem. Gathering the dispatch cost
structures and the network constraints from all areas
for a centralized solution remains difficult due to
technical, historical, and sometimes legal barriers.
Motivated by the need for a distributed solution
architecture for multi-area power systems, we propose a
distributed Lagrangian algorithm in this paper.We
establish convergence rates for our algorithm that
solves the deterministic tie-line scheduling problem as
well as its robust variant (with policy space
approximations). Our algorithm does not need any form
of central coordination. We illustrate its efficacy on
IEEE test systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Islam:2017:FLP,
author = "Mohammad A. Islam and Shaolei Ren and Adam Wierman",
title = "A First Look at Power Attacks in Multi-Tenant Data
Centers",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "91--93",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152070",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Oversubscription increases the utilization of
expensive power infrastructure in multi-tenant data
centers, but it can create dangerous emergencies and
outages if the designed power capacity is exceeded.
Despite the safeguards in place today to prevent power
outages, this extended abstract demonstrates that
multi-tenant data centers are vulnerable to well-timed
power attacks launched by a malicious tenant (i.e.,
attacker). Further, we show that there is a physical
side channel --- a thermal side channel due to hot air
recirculation --- that contains information about the
benign tenants' runtime power usage. We develop a
state-augmented Kalman filter that guides an attacker
to precisely time its power attacks at moments that
coincide with the benign tenants' high power demand,
thus overloading the designed power capacity. Our
experimental results show that an attacker can capture
53\% of all attack opportunities, significantly
compromising the data center availability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pang:2017:LSF,
author = "John Z. F. Pang and Linqi Guo and Steven H. Low",
title = "Load-side Frequency Regulation with Limited Control
Coverage",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "94--96",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152071",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Increasing renewable energy increases uncertainty in
energy systems. As a consequence, generator-side
control for frequency regulation, impacted by the slow
reaction of generators to meet urgent needs, may no
longer suffice. With increasing integration of smart
appliances which are able to sense, communicate and
control, load-side control can help alleviate the
aforementioned problem as it reacts fast and helps to
localize disturbances. However, almost all existing
methods for optimal load-side control require full
information control coverage in the system. Framing the
problem as an optimization problem and applying
saddle-point dynamics, we obtain a control law that
rebalances power and asymptotically stabilizes
frequency after a disturbance. We generalize previous
work to design a controller which only requires partial
control coverage over all nodes, yet still achieves
secondary frequency control. We verify these results
via simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kelic:2017:ICI,
author = "Andjelka Kelic",
title = "Interdependencies in Critical Infrastructure
Modeling",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "99--102",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152073",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Critical infrastructures are highly interconnected
both within an infrastructure sector and with one
another. In many cases, there are also cyber systems
that provide information or control to those
infrastructures. Those dependencies can lead to
unexpected consequences in the event of an incident.
Simulation models that account for dependencies are
critical to gain insight. This document provides an
overview of accounting for dependencies in constructing
simulation models and some of the associated
challenges. The 9-1-1 system provides an example of a
highly connected critical infrastructure system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2017:MPS,
author = "Linqi Guo and Chen Liang and Steven H. Low",
title = "Monotonicity Properties and Spectral Characterization
of Power Redistribution in Cascading Failures",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "103--106",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152074",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we apply spectral graph theory methods
to study the monotonicity and structural properties of
power redistribution in a cascading failure process. We
demonstrate that in contrast to the lack of
monotonicity in physical domain, there is a rich
collection of monotonicity one can explore in the
spectral domain, leading to a systematic way to define
topological metrics that are monotonic. It is further
shown that many useful quantities in cascading failure
analysis can be unified into a spectral inner product,
which itself is related to graphical properties of the
transmission network. Such graphical interpretations
precisely capture the Kirchhoff's law expressed in
terms of graph structural properties and gauge the
impact of a line when it is tripped.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oostenbrink:2017:CID,
author = "Jorik Oostenbrink and Fernando Kuipers",
title = "Computing the Impact of Disasters on Networks",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "107--110",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152075",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the vulnerability of a
network to disasters, in particular earthquakes, and we
propose an efficient method to compute the distribution
of a network performance measure, based on a finite set
of disaster areas and occurrence probabilities. Our
approach has been implemented as a tool to help
visualize the vulnerability of a network to disasters.
With that tool, we demonstrate our methods on an
official set of Japanese earthquake scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soltan:2017:APG,
author = "Saleh Soltan and Gil Zussman",
title = "Algorithms for Power Grid State Estimation after
Cyber-Physical Attacks",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "111--114",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present methods for estimating the state of the
power grid following a cyber-physical attack. We assume
that an adversary attacks an area by: (i) disconnecting
some lines within that area (failed lines), and (ii)
obstructing the information from within the area to
reach the control center. Given the phase angles of the
nodes outside the attacked area under either the DC or
AC power flow models (before and after the attack), the
provided methods can estimate the phase angles of the
nodes and detect the failed lines inside the attacked
area. The novelty of our approach is the transformation
of the line failures detection problem, which is
combinatorial in nature, to a convex optimization
problem. As a result, our methods can detect any number
of line failures in a running time that is independent
of the number of failures and is solely dependent on
the size of the attacked area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bienstock:2017:CUA,
author = "Daniel Bienstock and Mauro Escobar",
title = "Computing undetectable attacks on power grids",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "115--118",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152077",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider combined data and physical attacks on
power grids, motivated by recent events and research.
We consider a setting where an attacker may alter the
topology of a power grid by removing lines and may also
alter the load (demand) of some nodes; simultaneously
the attacker interdicts data flowing to the control
center. We use the PMU model of data that provides
high-fidelity AC power flow data (voltages and
currents) The goal of the attacker is to provide data
that paints a completely safe picture for the grid
which is consistent with the net load change, while at
the same time disguising large line overloads, a
fundamentally dangerous situation that may lead to a
cascading failure. We provide a computational procedure
that efficiently computes sparse attacks even on cases
of large grids.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stergiopoulos:2017:IAJ,
author = "George Stergiopoulos and Evangelos Valvis and Foivos
Anagnou-Misyris and Nick Bozovic and Dimitris
Gritzalis",
title = "Interdependency analysis of junctions for congestion
mitigation in Transportation Infrastructures",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "119--124",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152078",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The resilience of the Transportation road
infrastructure network is of major importance, since
failures such as prolonged road congestion in specific
parts of the infrastructure often initiate major
cascading effects that block transportation and/or
disrupt services of other infrastructures over wide
areas. Existing traffic flow analysis methods lack the
ability to understand cascading effect of congestions
and how to improve overall resilience in greater areas.
Dependency risk graphs have been proposed as a tool for
analyzing such cascading failures using infrastructure
dependency chains. In this paper, we propose a
risk-based interdependency analysis methodology capable
to detect large-scale traffic congestions between
interconnected junctions of the road network and
provide mitigation solutions to increase traffic flow
resilience. Dependency risk chains of junctions provide
important information about which junctions are
affected when other major junctions are congested in
the road transportation network. Targeted mitigation
mechanisms for traffic congestion can be proposed and
the causes of bottlenecks can be analyzed to introduce
road constructions or reparations with the best
possible results in relieving traffic. We applied the
proposed methodology on data collected by the UK
government using cyber-physical traffic sensors over
the course of 6 years. Our tool analyzed the UK major/A
road transportation network, detected n-order junction
dependencies and automatically proposed specific
mitigation solutions to increase the overall resilience
of the road infrastructure network. Simulation results
indicate that detected mitigation options, if applied,
can increase overall congestion resilience in wider
areas of the network up to 12\% by lowering likelihood
of congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2017:DGA,
author = "Juntao Chen and Corinne Touati and Quanyan Zhu",
title = "A Dynamic Game Analysis and Design of Infrastructure
Network Protection and Recovery: 125",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "128",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Infrastructure networks are vulnerable to both cyber
and physical attacks. Building a secure and resilient
networked system is essential for providing reliable
and dependable services. To this end, we establish a
two-player three-stage game framework to capture the
dynamics in the infrastructure protection and recovery
phases. Specifically, the goal of the infrastructure
network designer is to keep the network connected
before and after the attack, while the adversary aims
to disconnect the network by compromising a set of
links. With costs for creating and removing links, the
two players aim to maximize their utilities while
minimizing the costs. In this paper, we use the concept
of subgame perfect equilibrium (SPE) to characterize
the optimal strategies of the network defender and
attacker. We derive the SPE explicitly in terms of
system parameters. Finally, we use a case study of
UAV-enabled communication networks for disaster
recovery to corroborate the obtained analytical
results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ding:2017:CBT,
author = "Jianguo Ding and Yacine Atif and Sten F. Andler and
Birgitta Lindstr{\"o}m and Manfred Jeusfeld",
title = "{CPS}-based Threat Modeling for Critical
Infrastructure Protection",
journal = j-SIGMETRICS,
volume = "45",
number = "2",
pages = "129--132",
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3152042.3152080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Oct 12 14:25:43 MDT 2017",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cyber-Physical Systems (CPSs) are augmenting
traditional Critical Infrastructures (CIs) with
data-rich operations. This integration creates complex
interdependencies that expose CIs and their components
to new threats. A systematic approach to threat
modeling is necessary to assess CIs' vulnerability to
cyber, physical, or social attacks. We suggest a new
threat modeling approach to systematically synthesize
knowledge about the safety management of complex CIs
and situational awareness that helps understanding the
nature of a threat and its potential cascading-effects
implications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2017:WIC,
author = "Xunyu Zhou",
title = "Who Are {I}: Intrapersonal Conflicts in Performance
Measure and Control",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "1--1",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yekkehkhany:2017:GPT,
author = "Ali Yekkehkhany and Avesta Hojjati and Mohammad H.
Hajiesmaili",
title = "{GB-PANDAS}: Throughput and heavy-traffic optimality
analysis for affinity scheduling",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "2--14",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199528",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic affinity scheduling has been an open problem
for nearly three decades. The problem is to dynamically
schedule multi-type tasks to multi-skilled servers such
that the resulting queueing system is both stable in
the capacity region (throughput optimality) and the
mean delay of tasks is minimized at high loads near the
boundary of the capacity region (heavy-traffic
optimality). As for applications, data-intensive
analytics like MapReduce, Hadoop, and Dryad fit into
this setting, where the set of servers is heterogeneous
for different task types, so the pair of task type and
server determines the processing rate of the task. The
load balancing algorithm used in such frameworks is an
example of affinity scheduling which is desired to be
both robust and delay optimal at high loads when
hot-spots occur. Fluid model planning, the MaxWeight
algorithm, and the generalized c?-rule are among the
first algorithms proposed for affinity scheduling that
have theoretical guarantees on being optimal in
different senses, which will be discussed in the
related work section. All these algorithms are not
practical for use in data center applications because
of their non-realistic assumptions. The
join-the-shortest-queue-MaxWeight (JSQMaxWeight),
JSQ-Priority, and weighted-workload algorithms are
examples of load balancing policies for systems with
two and three levels of data locality with a rack
structure. In this work, we propose the
Generalized-Balanced-Pandas algorithm (GB-PANDAS) for a
system with multiple levels of data locality and prove
its throughput optimality. We prove this result under
an arbitrary distribution for service times, whereas
most previous theoretical work assumes geometric
distribution for service times. The extensive
simulation results show that the GB-PANDAS algorithm
alleviates the mean delay and has a better performance
than the JSQMaxWeight algorithm by up to twofold at
high loads. We believe that the GB-PANDAS algorithm is
heavy-traffic optimal in a larger region than
JSQ-MaxWeight, which is an interesting problem for
future work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldsztajn:2017:CNA,
author = "Diego Goldsztajn and Andres Ferragut and Fernando
Paganini and Matthieu Jonckheere",
title = "Controlling the number of active instances in a cloud
environment",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "15--20",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199529",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a cloud environment in which computing
instances may either be reserved in advance, or
dynamically spawned to serve a fluctuating or unknown
load. We first consider a centralized scheme where a
system operator maintains the job queue and controls
the spawning of additional capacity; through queueing
models and their fluid and diffusion counterparts we
explore the tradeoff between queueing delay and the
service capacity variability. Secondly, we consider the
setting of a dispatcher who must immediately send jobs,
with no delay, to decentralized instances, and in
addition may summon extra capacity. Here the capacity
scaling problem couples with one of load balancing. We
show how the popular join-the-idle-queue policy can be
combined with an adequate rule for spawning instances,
yielding an equilibrium with no queuing delay and
controlling service capacity variability; we
accommodate as well the case where spawned instances
incur startup delay. Finally, we analyze the question
of deciding, for a given pricing structure for the
cloud service, how many fixed instances should be
reserved in advance. The behavior of these policies is
illustrated by simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joshi:2017:SRB,
author = "Gauri Joshi",
title = "Synergy via Redundancy: Boosting Service Capacity with
Adaptive Replication",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "21--28",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The maximum possible throughput (rate of task
completion) of a multi-server system is typically the
sum of the service rates of individual servers. Recent
works show that task replication can boost the
throughput, in particular if the service time has high
variability (Cv {$>$} 1). Thus, redundancy can be used
to create synergy among servers such that their overall
throughput is greater than sum of individual servers.
This paper seeks to find the fundamental limit of this
capacity boost achieved by task replication. The
optimal adaptive replication policy can be found using
a Markov Decision Process (MDP) framework, but the MDP
is hard to solve in general. We propose two replication
policies, MaxRate and AdaRep that gradually add
replicas only when needed. To quantify the optimality
gap of these policies, we also derive an a upper bound
on the service capacity for the two-server case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2017:DLA,
author = "Ting He",
title = "Distributed Link Anomaly Detection via Partial Network
Tomography",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "29--42",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of detecting link loss
anomalies from end-to-end measurements using network
tomography. Network tomography provides an alternative
to traditional means of network monitoring by inferring
link-level performance characteristics from end-to-end
measurements. Existing network tomography solutions,
however, insist on characterizing the performance of
all the links, which introduces unnecessary delays for
anomaly detection due to the need of collecting all the
measurements at a central location. We address this
problem by developing a distributed detection scheme
that integrates detection into the measurement fusion
process by testing anomalies at the level of minimal
identifiable link sequences (MILSs). We develop
efficient methods to configure the proposed detection
scheme such that its false alarm probability satisfies
a given bound. Meanwhile, we provide analytical bounds
on the detection probability and the detection delay.
We then extend our solution to further improve the
detection performance by designing the probing and
fusion process. Our evaluations on real topologies
verify that the proposed scheme significantly
outperforms both centralized detection based on link
parameters inferred by traditional network tomography
and distributed detection based on raw end-to-end
measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tootaghaj:2017:PTO,
author = "Diman Zad Tootaghaj and Ting He and Thomas {La
Porta}",
title = "Parsimonious Tomography: Optimizing
Cost-Identifiability Trade-off for Probing-based
Network Monitoring",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "43--55",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network tomography using end-to-end probes provides a
powerful tool for monitoring the performance of
internal network elements. However, active probing can
generate tremendous traffic, which degrades the overall
network performance. Meanwhile, not all the probing
paths contain useful information for identifying the
link metrics of interest. This observation motivates us
to study the optimal selection of monitoring paths to
balance identifiability and probing cost. Assuming
additive link metrics (e.g., delays), we consider four
closely-related optimization problems: (1) Max-ILCost
that maximizes the number of identifiable links under a
probing budget, (2) Max-Rank-Cost that maximizes the
rank of selected paths under a probing budget, (3)
Min-Cost-IL that minimizes the probing cost while
preserving identifiability, and (4) Min-Cost-Rank that
minimizes the probing cost while preserving rank. While
(1) and (3) are hard to solve, (2) and (4) are easy to
solve, and the solutions give a good approximation for
(1) and (3). Specifically, we provide an optimal
algorithm for (4) and a (1?1/e)-approximation algorithm
for (2). We prove that the solution for (4) provides
tight upper/lower bounds on the minimum cost of (3),
and the solution for (2) provides upper/lower bounds on
the maximum identifiability of (1). Our evaluations on
real topologies show that solutions to the rank-based
optimization (2, 4) have superior performance in terms
of the objectives of the identifiability-based
optimization (1, 3), and our solutions can reduce the
total probing cost by an order of magnitude while
achieving the same monitoring performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jansen:2017:PEW,
author = "Bart Jansen and Timothy Goodwin and Varun Gupta and
Fernando Kuipers and Gil Zussman",
title = "Performance Evaluation of {WebRTC}-based Video
Conferencing",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "56--68",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "WebRTC has quickly become popular as a video
conferencing platform, partly due to the fact that many
browsers support it. WebRTC utilizes the Google
Congestion Control (GCC) algorithm to provide
congestion control for realtime communications over
UDP. The performance during a WebRTC call may be
influenced by several factors, including the underlying
WebRTC implementation, the device and network
characteristics, and the network topology. In this
paper, we perform a thorough performance evaluation of
WebRTC both in emulated synthetic network conditions as
well as in real wired and wireless networks. Our
evaluation shows that WebRTC streams have a slightly
higher priority than TCP flows when competing with
cross traffic. In general, while in several of the
considered scenarios WebRTC performed as expected, we
observed important cases where there is room for
improvement. These include the wireless domain and the
newly added support for the video codecs VP9 and H.264
that does not perform as expected.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moka:2017:APS,
author = "S. B. Moka and S. Juneja and M. R. H. Mandjes",
title = "Analysis of Perfect Sampling Methods for Hard-sphere
Models",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "69--75",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of generating perfect samples
from a Gibbs point process, a spatial process that is
absolutely continuous w.r.t. a Poisson point process.
Examples include area-interaction processes,
hard-sphere models and Strauss processes.
Traditionally, this is addressed using coupling from
the past (CFTP) based methods. We consider
acceptance-rejection methods that, unlike the common
CFTP methods, do not have the impatient-user bias. Our
key contribution is a novel importance sampling based
acceptance- rejection methodology for generating
perfect samples from Gibbs point processes. We focus on
a simpler setting of hard-sphere models in a
d-dimensional hypercube that we analyze in an
asymptotic regime where the number of spheres generated
increases to infinity while the sphere radius decreases
to zero at varying rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hollocou:2017:MLC,
author = "Alexandre Hollocou and Thomas Bonald and Marc
Lelarge",
title = "Multiple Local Community Detection",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "76--83",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Community detection is a classical problem in the
field of graph mining. We are interested in local
community detection where the objective is the recover
the communities containing some given set of nodes,
called the seed set. While existing approaches
typically recover only one community around the seed
set, most nodes belong to multiple communities in
practice. In this paper, we introduce a new algorithm
for detecting multiple local communities, possibly
overlapping, by expanding the initial seed set. The new
nodes are selected by some local clustering of the
graph embedded in a vector space of low dimension. We
validate our approach on real graphs, and show that it
provides more information than existing algorithms to
recover the complex graph structure that appears
locally.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2017:LDIb,
author = "Yuli Baryshnikov and Abram Magner",
title = "Large Deviations for Increasing Subsequences of
Permutations and a Concurrency Application",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "84--89",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The study of concurrent processes with conflicts
affecting concurrent execution has been long related to
various geometric objects. In the special case of two
processes and non-overlapping conflicts (definitions
below) an instance of a problem is encoded by a
permutation describing the conflict sets for the
interacting processes. Further, it turns out that the
set of increasing subsequences of the permutation
describes the homotopy classes of the execution plans
for the concurrent processes, an abstraction encoding
one particular serialization of the executions of two
processes. This motivates the study of random
increasing subsequences of random permutations. Here,
we give a large deviation principle which implies that
such a subsequence never deviates too far from the
identity permutation: a random serialization of two
concurrent processes will not delay either process's
access to shared resources too much at any given time.
We then give an efficient exact algorithm for uniform
random sampling of an increasing subsequence from a
given permutation. Finally, we indicate how our results
generalize to larger numbers of processes, wherein
conflict sets may take on more interesting
geometries.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhatt:2017:IIF,
author = "Sujay Bhatt and Vikram Krishnamurthy",
title = "Incentivized Information Fusion with Social Sensors",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "90--95",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper deals with the problem of incentivized
information fusion, where a controller seeks to infer
an unknown parameter by incentivizing a network of
social sensors to reveal the information. The social
sensors gather information on the parameter after
interacting with other social sensors, to optimize a
local utility function. We are interested in finding
incentive rules that are easy to compute and implement.
In particular, we give sufficient conditions on the
model parameters under which the optimal rule for the
controller is provably a threshold decision rule, i.e,
don't incentivize when the estimate (of the parameter)
is below a certain level and incentivize otherwise. We
will further provide a complete sample path
characterization of the optimal incentive rule, i.e,
the nature (average trend) of the optimal incentive
sequence resulting from the controller employing the
optimal threshold rule. We show that the optimal
incentive sequence is a sub-martingale, i.e, the
optimal incentives increase on average over time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abbe:2017:LGD,
author = "Emmanuel Abbe",
title = "Learning from graphical data",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "96--96",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2017:SPW,
author = "Fengyou Sun and Yuming Jiang",
title = "A Statistical Property of Wireless Channel Capacity:
Theory and Application",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "97--108",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a set of new results on wireless
channel capacity by exploring its special
characteristics. An appealing discovery is that the
instantaneous and cumulative capacity distributions of
typical fading channels are light tailed. An
implication of this property is that these
distributions and subsequently the distributions of
delay and backlog for constant arrivals can be
upper-bounded by some exponential functions, which is
often assumed but not justified in the literature of
wireless network performance analysis. In addition,
three representative dependence structures of the
capacity process are studied, namely comonotonicity,
independence, and Markovian, and bounds are derived for
the cumulative capacity distribution and
delay-constrained capacity. To help gain insights in
the performance of a wireless channel whose capacity
process may be too complex or detailed dependence
information is lacking, stochastic orders are
introduced to the capacity process, based on which,
comparison results of delay and delay-constrained
capacity are obtained. Moreover, the impact of
self-interference in communication, which is an open
problem in stochastic network calculus (SNC), is
investigated and original results are derived. These
results complement the SNC literature, easing its
application to wireless networks and its extension
towards a calculus for wireless networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cecchi:2017:MFL,
author = "F. Cecchi and P. M. {Van de Ven} and S. Shneer",
title = "Mean-field limits for multi-hop random-access
networks",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "109--122",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent years have seen wireless networks increasing in
scale, interconnecting a vast number of devices over
large areas. Due to their size these networks rely on
distributed algorithms for control, allowing each node
to regulate its own activity. A popular such algorithm
is Carrier-Sense Multi- Access (CSMA), which is at the
core of the well-known 802.11 protocol. Performance
analysis of CSMA-based networks has received
significant attention in the research literature in
recent years, but focused almost exclusively on
saturated networks where nodes always have packets
available. However, one of the key features of emerging
large-scale networks is their ability to transmit
packets across large distances via multiple
intermediate nodes (multi-hop). This gives rise to
vastly more complex dynamics, and to phenomena not
captured by saturated models. Consequently, performance
analysis of multi-hop random-access networks remains
elusive. Based on the observation that emerging
multi-hop networks are typically dense and contain a
large number of nodes, we consider the mean-field limit
of multihop CSMA networks. We show that the equilibrium
point of the resulting initial value problem provides a
remarkably accurate approximation for the pre-limit
stochastic network in stationarity, even for sparse
networks with few nodes. Using these equilibrium points
we investigate the performance of linear networks under
different back-off rates, which govern how fast each
node transmits. We find the back-off rates which
provide the best end-to-end throughput and network
robustness, and use these insights to determine the
optimal back-off rates for general networks. We confirm
numerically the resulting performance gains compared to
the current practice of assigning all nodes the same
back-off rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cecchi:2017:SMF,
author = "F. Cecchi and S. C. Borst and J. S. H. van Leeuwaarden
and P. A. Whiting",
title = "Spatial Mean-Field Limits for Ultra-Dense
Random-Access Networks",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "123--136",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random-access algorithms such as the CSMA protocol
provide a popular mechanism for distributed medium
access control in wireless networks. In
saturated-buffer scenarios the joint activity process
in such random-access networks has a product-form
stationary distribution which provides useful
throughput estimates for persistent traffic flows.
However, these results do not capture the relevant
performance metrics in unsaturated-buffer scenarios,
which in particular arise in an IoT context with highly
intermittent traffic sources. Mean-field analysis has
emerged as a powerful approach to obtain tractable
performance estimates in such situations, and is not
only mathematically convenient, but also relevant as
wireless networks grow larger and denser with the
emergence of IoT applications. A crucial requirement
for the classical mean-field framework to apply however
is that the node population can be partitioned into a
finite number of classes of statistically
indistinguishable nodes. The latter condition is a
severe restriction since nodes typically have different
locations and hence experience different interference
constraints. Motivated by the above observations, we
develop in the present paper a novel mean-field
methodology which does not rely on any exchangeability
property. Since the spatiotemporal evolution of the
network can no longer be described through a
finite-dimensional population process, we adopt a
measure-valued state description, and prove that the
latter converges to a deterministic limit as the
network grows large and dense. The limit process is
characterized in terms of a system of
partial-differential equations, which exhibit a
striking local-global-interaction and time scale
separation property. Specifically, the queueing
dynamics at any given node are only affected by the
global network state through a single parsimonious
quantity. The latter quantity corresponds to the
fraction of time that no activity occurs within the
interference range of that particular node in case of a
certain static spatial activation measure. Extensive
simulation experiments demonstrate that the solution of
the partial-differential equations yields remarkably
accurate approximations for the queue length
distributions and delay metrics, even when the number
of nodes is fairly moderate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Juneja:2017:CDU,
author = "S. Juneja and N. Shimkin",
title = "On the Computation of Dynamic User Equilibrium in the
Multiclass Transient Fluid Queue",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "137--142",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the arrival timing problem faced by
multiclass strategic customers to a single queue. The
customers sensitivities to delay as well as service
completion time preferences may be heterogeneous and
the latter may vary non linearly with time. This
captures many realistic settings where customers have
preferences on when to arrive at a queue. We consider a
fluid setup, so each customer is a point in a continuum
and service rate is deterministic. This problem has
been well studied in the transportation literature as
the bottleneck model and the equilibrium customer
arrival profile is shown to uniquely exist using
intricate fixed point arguments. We develop a simple,
elegant and geometrically insightful iterative method
to arrive at this equilibrium profile, and provide an
equally simple uniqueness proof. Further, under
somewhat stringent assumptions, we arrive at the rate
of convergence of the proposed algorithm. The simple
geometric proof allows easy incorporation of useful
extensions --- to illustrate, we consider time varying
service rates where the equilibrium profile is easily
computed. Further, our results easily extend to the
case of customers balking when their costs are above a
class dependent threshold.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Telek:2017:RTD,
author = "Miklos Telek and Benny {Van Houdt}",
title = "Response Time Distribution of a Class of Limited
Processor Sharing Queues",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "143--155",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Processor sharing queues are often used to study the
performance of time-sharing systems. In such systems
the total service rate ?(m) depends on the number of
jobs m present in the system and there is a limit
implemented, called the multi-programming level (MPL),
on the number of jobs k that can be served
simultaneously. Prior work showed that under highly
variable jobs sizes, setting the MPL k beyond the value
k = arg maxm ?(m) may reduce the mean response time. In
order to study the impact of the MPL k on the response
time distribution, we analyse the MAP/PH/LPSk( m)
queue. In such a queue jobs arrive according to a
Markovian arrival process (MAP), have phase-type (PH)
distributed sizes, at most k jobs are processed in
parallel and the total service rate depends on the
number of jobs being served. Jobs that arrive when
there are k or more jobs present are queued. We derive
an expression for the Laplace transform of the response
time distribution and numerically invert it to study
the impact of the MPL k. Numerical results illustrate
to what extent increasing k beyond k? increases the
quantiles and tail probabilities of the response time
distribution. They further demonstrate that for bursty
arrivals and larger MPL k values having more variable
job sizes may reduce the mean response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tay:2017:TES,
author = "Y. C. Tay",
title = "A technique to estimate a system's asymptotic delay
and throughput",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "156--159",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a computer system usually has two
asymptotic parameters: its minimum latency R0 when
workload is low, and its maximum throughput X1 when
workload is high. These parameters are important
because they can be used for, say, congestion control
in a network connection, or choosing between graphics
engines for a video game. The estimation of R0 and X1
is not straightforward: the hardware may be in a
blackbox, the software may interact in complicated
ways, and the estimates depend on the workload. This
short paper proposes a technique for using statistical
regression, and an equation from queueing analysis, to
estimate R0 and X1.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2017:ELS,
author = "Yingdong Lu and Mark S. Squillante and Chai Wah Wu",
title = "Epidemic-Like Stochastic Processes with Time-Varying
Behavior: Structural Properties and Asymptotic Limits",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "160--166",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The mathematical analysis of epidemic-like behavior
has a rich history, going all the way back to the
seminal work of Bernoulli in 1766 [5]. More recently,
mathematical models of epidemic-like behavior have
received considerable attention in the research
literature based on additional motivation from areas
such as communication and social networks,
cybersecurity systems, and financial markets; see,
e.g., [6]. The types of viral behaviors exhibited in
many of these applications tend to be characterized by
epidemic-like stochastic processes with time-varying
parameters [12, 13]. In this paper we consider variants
of the classical mathematical model of epidemic-like
behavior analyzed by Kurtz [8],[7, Chapter 11],
extending the analysis and results to first incorporate
time-varying behavior for the infection and cure rates
of the model and to then investigate structural
properties of the interactions between local (micro)
and global (macro) behaviors within the process.
Specifically, we start by formally presenting an
epidemic-like continuous-time, discrete-state
stochastic process in which each individual comprising
the population can be either in a non-infected state or
in an infected state, and where the rate at which the
noninfected population is infected and the rate at
which the infected population is cured are both
functions of time. We established that, under general
assumptions on the time-varying processes and under a
mean-field scaling with respect to population size n,
the stochastic processes converge to a continuous-time,
continuous-state time-varying dynamical system. Then we
study the stationary behavior of both the original
stochastic process and the mean-field limiting
dynamical system, and verify that they, in fact, have
similar asymptotic behavior with respect to time. In
other words, we establish that the following diagram is
commutative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allybokus:2017:LBF,
author = "Zaid Allybokus and Konstantin Avrachenkov and
J{\'e}r{\'e}mie Leguay and Lorenzo Maggi",
title = "Lower Bounds for the Fair Resource Allocation
Problem",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "167--173",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The $ \alpha $-fair resource allocation problem has
received remarkable attention and has been studied in
numerous application fields. Several algorithms have
been proposed in the context of $ \alpha $-fair
resource sharing to distributively compute its value.
However, little work has been done on its structural
properties. In this work, we present a lower bound for
the optimal solution of the weighted $ \alpha $-fair
resource allocation problem and compare it with
existing propositions in the literature. Our
derivations rely on a localization property verified by
optimization problems with separable objective that
permit one to better exploit their local structures. We
give a local version of the well-known midpoint
domination axiom used to axiomatically build the Nash
Bargaining Solution (or proportionally fair resource
allocation problem). Moreover, we show how our lower
bound can improve the performances of a distributed
algorithm based on the Alternating Directions Method of
Multipliers (ADMM). The evaluation of the algorithm
shows that our lower bound can considerably reduce its
convergence time up to two orders of magnitude compared
to when the bound is not used at all or is simply
looser.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2017:ODU,
author = "Ruidi Chen and Ioannis Paschalidis",
title = "Outlier Detection Using Robust Optimization with
Uncertainty Sets Constructed from Risk Measures",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "174--179",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a robust optimization formulation for the
problem of outlier detection, with an uncertainty set
determined by the risk preference of the decision
maker. This connection between risk measures and
uncertainty sets is established in 3. Inspired by this
methodology for uncertainty set construction under a
distortion risk measure, we propose a regularized
optimization problem with a finite number of
constraints to estimate a robust regression plane that
is less sensitive to outliers. An alternating
minimization scheme is applied to solve for the optimal
solution. We show that in three different scenarios
differentiated by the location of outliers, our Risk
Measure-based Robust Optimization (RMRO) approach
outperforms the traditionally used robust regression 12
in terms of the estimation accuracy and detection
rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2017:ORC,
author = "Jiankui Yang and David D. Yao and Heng-Qing Ye",
title = "On the Optimality of Reflection Control, with
Production-Inventory Applications",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "180--183",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the control of a Brownian motion (BM) with a
negative drift, so as to minimize a long-run average
cost objective. We show the optimality of a class of
reflection controls that prevent the BM from dropping
below some negative level r, by cancelling out from
time to time part of the negative drift; and this
optimality is established for any holding cost function
h(x) that is increasing in x ? 0 and decreasing in x ?
0. Furthermore, we show the optimal reflection level
can be derived as the fixed point that equates the
long-run average cost to the holding cost. We also show
the asymptotic optimality of this reflection control
when it is applied to production-inventory systems
driven by discrete counting processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:2017:AN,
author = "Albert Greenberg",
title = "{Azure} Networking",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "184--184",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To create the Azure cloud, we imagined and built a
massive cloud network, literally from the ground up.
Azure creates and sustains a virtual data center for
every tenant, under the tenant's control, meeting the
tenant's goals for high security, reliability, ease of
use, and performance. Building all this calls for
innovation across hardware and software. I'll discuss
key challenges and solutions, as well as demo new
functionality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Le:2017:OEPb,
author = "Tan N. Le and Jie Liang and Zhenhua Liu and Ramesh K.
Sitaraman and Jayakrishnan Nair and Bong Jun Choi",
title = "Optimal Energy Procurement for Geo-distributed Data
Centers in Multi-timescale Electricity Markets",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "185--197",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-timescale electricity markets augment the
traditional electricity market by enabling consumers to
procure electricity in a futures market. Heavy power
consumers, such as cloud providers and data center
operators, can significantly benefit from
multi-timescale electricity markets by purchasing some
of the needed electricity ahead of time at cheaper
rates. However, the energy procurement strategy for
data centers in multi-timescale markets becomes a
challenging problem when real world dynamics, such as
spatial diversity of data centers and uncertainties of
renewable energy, IT workload, and electricity price,
are taken into account. In this paper, we develop
energy procurement algorithms for geo-distributed data
centers that utilize multi-timescale markets to
minimize the electricity procurement cost. We propose
two algorithms. The first algorithm provides provably
optimal cost minimization while the other achieves
near-optimal cost at a much lower computational cost.
We empirically evaluate our energy procurement
algorithms using real-world traces of renewable energy,
electricity prices, and workload demand. Our empirical
evaluations show that our proposed energy procurement
algorithms save up to 44\% of the total cost compared
to traditional algorithms that do not use
multi-timescale electricity markets or geographical
load balancing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cetinay:2017:ACF,
author = "Hale Cetinay and Saleh Soltan and Fernando A. Kuipers
and Gil Zussman and Piet {Van Mieghem}",
title = "Analyzing Cascading Failures in Power Grids under the
{AC} and {DC} Power Flow Models",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "198--203",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study cascading failures in power
grids under the nonlinear AC and linearized DC power
flow models. We numerically compare the evolution of
cascades after single line failures under the two flow
models in four test networks. The cascade simulations
demonstrate that the assumptions underlying the DC
model (e.g., ignoring power losses, reactive power
flows, and voltage magnitude variations) can lead to
inaccurate and overly optimistic cascade predictions.
Particularly, in large networks the DC model tends to
overestimate the yield (the ratio of the demand
supplied at the end of the cascade to the initial
demand). Hence, using the DC model for cascade
prediction may result in a misrepresentation of the
gravity of a cascade.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Deiana:2017:FFM,
author = "Eleonora Deiana and Guy Latouche and Marie-Ange
Remiche",
title = "Fluid flow model for energy-aware server performance
evaluation",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "204--209",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We use a fluid flow model with reactive bounds to
analyse a data processing center with energy-aware
servers. The servers switch between four energy states
depending on the level of the buffer content and on
three reactive bounds. Every state consumes different
amounts of energy. We use a regenerative approach to
calculate the stationary distribution of the system and
the expected energy consumption.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:2017:MSI,
author = "Debasis Mitra and Qiong Wang",
title = "Management Strategies for Industrial Laboratories with
Knowledge Memory",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "210--216",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a simplified abstraction of an industrial
laboratory consisting of a two-stage network, Research
(R) and Development (D). Ideas and prototypes are
incubated in the R stage, the projects departing this
stage are assessed, and, if favorable, the project
proceeds to the D stage. Revenue is generated from the
sale of products/solutions that are outputs of the D
stage, and the sale and licensing of patents that are
generated at both stages. In our discrete time model,
in each time period the managers of the industrial
laboratory are given a constant amount of money to
invest in the two stages. The investments determine the
capacities of the stages based on linear unit costs. A
novel feature of the model is ``knowledge stocks'' for
the stages, which represent the accumulated know-how
from practicing research and development activities;
higher knowledge stock implies lower cost. The memory
in knowledge stocks makes current investment decisions
have long term impact on costs and profits. Three
strategies for profit maximization are investigated. In
myopic profit maximization we show the existence of
multiple equilibria and the phenomenon of state
entrapment in suboptimal regimes, which are absent in
the other strategies. Numerical results illustrate the
main features of the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2017:OEP,
author = "Y. Lu and S. T. Maguluri and M. S. Squillante and T.
Suk and X. Wu",
title = "Optimal Energy Procurement for Geo-distributed Data
Centers in Multi-timescale Electricity Markets",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "217--223",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate a canonical input-queued switch
scheduling problem in which the objective is to
minimize the infinite horizon discounted queue length
under symmetric arrivals, for which we derive an
optimal scheduling policy and establish its theoretical
properties with respect to delay. We then compare via
simulation these theoretical properties of our optimal
policy with those of the well-known MaxWeight
scheduling algorithm in order to gain insights on the
delay optimality of the MaxWeight scheduling policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aktas:2017:SMD,
author = "Mehmet Fatih Aktas and Pei Peng and Emina Soljanin",
title = "Straggler Mitigation by Delayed Relaunch of Tasks",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "224--231",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2017:HTD,
author = "Weina Wang and Siva Theja Maguluri and R. Srikant and
Lei Ying",
title = "Heavy-Traffic Delay Insensitivity in Connection-Level
Models of Data Transfer with Proportionally Fair
Bandwidth Sharing",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "232--245",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the stringent requirements on delay
performance in data center networks, we study a
connection-level model for bandwidth sharing among data
transfer flows, where file sizes have phase-type
distributions and proportionally fair bandwidth
allocation is used. We analyze the expected number of
files in steady-state by setting the steady-state drift
of an appropriately chosen Lyapunov function equal to
zero. We consider the heavy-traffic regime and obtain
asymptotically tight bounds on the expected number of
files in the system. Our results show that the expected
number of files under proportionally fair bandwidth
allocation is insensitive in heavy traffic to file size
distributions, thus complementing the diffusion
approximation result of Vlasiou et al. [20].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2017:PEJ,
author = "Giuliano Casale and Giuseppe Serazzi and Lulai Zhu",
title = "Performance Evaluation with {Java} Modelling Tools: a
Hands-on Introduction",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "246--247",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/java2010.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The goal of this tutorial is to introduce Java
Modelling Tools (JMT), an open source framework for
discrete-event simulation and analysis of queueing
networks, both product-form and extended, generalized
stochastic Petri nets (GSPNs), and queueing Petri nets
(QPNs). Thanks to a user-friendly graphical interface,
JMT is well-suited to teach performance modeling in
academia and to help research students familiarize with
classic modeling formalisms used in performance
evaluation. The tutorial introduces established and
novel features of the JMT suite and illustrates them on
case studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nannicini:2017:SMD,
author = "Giacomo Nannicini",
title = "Straggler Mitigation by Delayed Relaunch of Tasks",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "248--248",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maguluri:2017:DMH,
author = "Siva Theja Maguluri and R. Srikant and Weina Wang",
title = "The Drift Method for Heavy Traffic Limits, with
Applications in Data Centers and Networks",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "249--249",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Heavy traffic limits of queueing systems have been
studied in the literature using fluid and diffusion
limits. Recently, a new method called the 'Drift
Method' has been developed to study these limits. In
the drift method, a function of the queue lengths is
picked and its drift is set to zero in steady-state, to
obtain bounds on the steady-state queue lengths that
are tight in the heavy-traffic limit. The key is to
establish an appropriate notion of state-space collapse
in terms of steady-state moments of weighted queue
length differences, and use this state-space collapse
result when setting the drift equal to zero. These
moment bounds involved in state space collapse are also
obtained by drift arguments similar to the well-known
Foster-Lyapunov theorem. We will apply the methodology
to study routing, scheduling, and other resource
allocation problems that arise in data centers and
cloud computing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Braverman:2017:SMS,
author = "Anton Braverman and Jim Dai",
title = "{Stein}'s Method for Steady-State Approximations:
Error Bounds and Engineering Solutions",
journal = j-SIGMETRICS,
volume = "45",
number = "3",
pages = "250--250",
month = dec,
year = "2017",
CODEN = "????",
DOI = "https://doi.org/10.1145/3199524.3199570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Apr 10 06:31:40 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Heavy traffic limits of queueing systems have been
studied in the literature using fluid and diffusion
limits. Recently, a new method called the 'Drift
Method' has been developed to study these limits. In
the drift method, a function of the queue lengths is
picked and its drift is set to zero in steady-state, to
obtain bounds on the steady-state queue lengths that
are tight in the heavy-traffic limit. The key is to
establish an appropriate notion of state-space collapse
in terms of steady-state moments of weighted queue
length differences, and use this state-space collapse
result when setting the drift equal to zero. These
moment bounds involved in state space collapse are also
obtained by drift arguments similar to the well-known
Foster-Lyapunov theorem. We will apply the methodology
to study routing, scheduling, and other resource
allocation problems that arise in data centers and
cloud computing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Avrachenkov:2018:EBM,
author = "Konstantin Avrachenkov and Tejas Bodas",
title = "On the equivalence between multiclass processor
sharing and random order scheduling policies",
journal = j-SIGMETRICS,
volume = "45",
number = "4",
pages = "2--6",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3273996.3273998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Sep 8 07:47:02 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a single server system serving a multiclass
population. Some popular scheduling policies for such
system are the discriminatory processor sharing (DPS),
discriminatory random order service (DROS), generalized
processor sharing (GPS) and weighted fair queueing
(WFQ). In this paper, we propose two classes of
policies, namely MPS (multiclass processor sharing) and
MROS (multiclass random order service), that generalize
the four policies mentioned above. For the special case
when the multiclass population arrive according to
Poisson processes and have independent and exponential
service requirement with parameter ?, we show that the
tail of the sojourn time distribution for a class i
customer in a system with the MPS policy is a constant
multiple of the tail of the waiting time distribution
of a class i customer in a system with the MROS policy.
This result implies that for a class i customer, the
tail of the sojourn time distribution in a system with
the DPS (GPS) scheduling policy is a constant multiple
of the tail of the waiting time distribution in a
system with the DROS (respectively WFQ) policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2018:ICS,
author = "Ranjan Pal and Leana Golubchik and Konstantinos
Psounis and Pan Hui",
title = "Improving Cyber-Security via Profitable Insurance
Markets",
journal = j-SIGMETRICS,
volume = "45",
number = "4",
pages = "7--15",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3273996.3273999",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Sep 8 07:47:02 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work in security has illustrated that solutions
aimed at detection and elimination of security threats
alone are unlikely to result in a robust cyberspace. As
an orthogonal approach to mitigating security problems,
some researchers have pursued the use of
cyber-insurance as a suitable risk management
technique. In this regard, a recent work by the authors
in [1] have proposed efficient monopoly cyberinsurance
markets that maximize social welfare of users in a
communication network via premium discriminating them.
However, the work has a major drawback in the insurer
not being able to make strictly positive profit in
expectation, which in turn might lead to unsuccessful
insurance markets. In this paper, we provide a method
(based on the model in [1]) to overcome this drawback
for the risk-averse premium discriminating monopoly
cyber-insurer, and prove it in theory. More
specifically, we propose a non-regulatory mechanism to
allow monopoly cyber-insurers to make strictly positive
profit in expectation. To investigate the general
effectiveness of our mechanism beyond a monopoly
setting with full coverage, we conduct numerical
experiments (comparing social welfare at market
equilibrium) on (a) practical Internet-scale network
topologies that are formed by users who are free to
decide for themselves whether they want to purchase
insurance or not, (b) settings of perfect and imperfect
market competition, and (c) scenarios with partial
insurance coverage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misra:2018:SDP,
author = "Vishal Misra",
title = "Session details: Performance Evaluation Review",
journal = j-SIGMETRICS,
volume = "45",
number = "4",
pages = "",
month = mar,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3274475",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Sep 8 07:47:02 MDT 2018",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2018:SSA,
author = "Jim Dai",
title = "Steady-state Approximations: Achievement Lecture",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "1--1",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Diffusion models and mean-field models have been used
to approximate many stochastic dynamical systems. A
functional strong law of large numbers or a functional
central limit theorem justifies such an approximation.
Such a result, however, does not justify the
convergence of the equilibria of pre-limit systems to
the equilibrium of a limit system. In this talk, I will
touch on three recently developed methods for
justifying equilibrium convergence in the setting of
bandwidth sharing networks and multiclass queueing
networks, with a focus on moment generating function
method and the state-space-collapse. Based on joint
works with Anton Braverman, Chang Cao, Masakiyo
Miyazawa, and Xiangyu Zhang.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Banerjee:2018:SDC,
author = "Siddhartha Banerjee and Yash Kanoria and Pengyu Qian",
title = "State Dependent Control of Closed Queueing Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "2--4",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the design of state dependent control for a
closed queueing network model, inspired by shared
transportation systems such as ridesharing. In
particular, we focus on the design of assignment
policies, wherein the platform can choose which supply
unit to dispatch to meet an incoming customer request.
The supply unit subsequently becomes available at the
destination after dropping the customer. We consider
the proportion of dropped demand in steady state as the
performance measure. We propose a family of simple and
explicit state dependent policies called Scaled
MaxWeight (SMW) policies and prove that under the
complete resource pooling (CRP) condition (analogous to
a strict version of Hall's condition for bipartite
matchings), any SMW policy induces an exponential decay
of demand-dropping probability as the number of supply
units scales to infinity. Furthermore, we show that
there is an SMW policy that achieves the optimal
exponent among all assignment policies, and
analytically specify this policy in terms of the matrix
of customer-request arrival rates. The optimal SMW
policy protects structurally under-supplied
locations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fanti:2018:DLC,
author = "Giulia Fanti and Shaileshh Bojja Venkatakrishnan and
Surya Bakshi and Bradley Denby and Shruti Bhargava and
Andrew Miller and Pramod Viswanath",
title = "{Dandelion++}: Lightweight Cryptocurrency Networking
with Formal Anonymity Guarantees",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "5--7",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work has demonstrated significant anonymity
vulnerabilities in Bitcoin's networking stack. In
particular, the current mechanism for broadcasting
Bitcoin transactions allows third-party observers to
link transactions to the IP addresses that originated
them. This lays the groundwork for low-cost,
large-scale deanonymization attacks. In this work, we
present Dandelion++, a first-principles defense against
large-scale deanonymization attacks with near-optimal
information-theoretic guarantees. Dandelion++ builds
upon a recent proposal called Dandelion that exhibited
similar goals. However, in this paper, we highlight
some simplifying assumptions made in Dandelion, and
show how they can lead to serious deanonymization
attacks when violated. In contrast, Dandelion++ defends
against stronger adversaries that are allowed to
disobey protocol. Dandleion++ is lightweight, scalable,
and completely interoperable with the existing Bitcoin
network.We evaluate it through experiments on Bitcoin's
mainnet (i.e., the live Bitcoin network) to demonstrate
its interoperability and low broadcast latency
overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchnik:2018:BGD,
author = "Eliav Buchnik and Edith Cohen",
title = "Bootstrapped Graph Diffusions: Exposing the Power of
Nonlinearity",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "8--10",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph-based semi-supervised learning (SSL) algorithms
predict labels for all nodes based on provided labels
of a small set of seed nodes. Classic methods capture
the graph structure through some underlying diffusion
process that propagates through the graph edges.
Spectral diffusion, which includes personalized page
rank and label propagation, propagates through random
walks. Social diffusion propagates through shortest
paths. These diffusions are linear in the sense of not
distinguishing between contributions of few ``strong''
relations or many ``weak'' relations. Recent methods
such as node embeddings and graph convolutional
networks (GCN) attained significant gains in quality
for SSL tasks. These methods vary on how the graph
structure, seed label information, and other features
are used, but do share a common thread of nonlinearity
that suppresses weak relations and re-enforces stronger
ones. Aiming for quality gain with more scalable
methods, we revisit classic linear diffusion methods
and place them in a self-training framework. The
resulting bootstrapped diffusions are nonlinear in that
they re-enforce stronger relations, as with the more
complex methods. Surprisingly, we observe that SSL with
bootstrapped diffusions not only significantly improves
over the respective non-bootstrapped baselines but also
outperform state-of-the-art SSL methods. Moreover,
since the self-training wrapper retains the scalability
of the base method, we obtain both higher quality and
better scalability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hoffmann:2018:CUC,
author = "Jessica Hoffmann and Constantine Caramanis",
title = "The Cost of Uncertainty in Curing Epidemics",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "11--13",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Epidemic models are used across biological and social
sciences, engineering, and computer science, and have
had important impact in the study of the dynamics of
human disease and computer viruses, but also trends
rumors, viral videos, and most recently the spread of
fake news on social networks. In this paper, we focus
on epidemics propagating on a graph, as introduced by
the seminal paper [5]. In particular, we consider
so-called SI models (see below for a precise
definition) where an infected node can only propagate
the infection to its non-infected neighbor, as opposed
to the fully mixed models considered in the early
literature. This graph-based approach provides a more
realistic model, in which the spread of the epidemic is
determined by the connectivity of the graph, and
accordingly some nodes may play a larger role than
others in the spread of the infection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sejourne:2018:PFM,
author = "Thibault S{\'e}journ{\'e} and Samitha Samaranayake and
Siddhartha Banerjee",
title = "The Price of Fragmentation in Mobility-on-Demand
Services",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "14--16",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mobility-on-Demand platforms are a fast growing
component of the urban transit ecosystem. Though a
growing literature addresses the question of how to
make individual MoD platforms more efficient, less is
known about the cost of market fragmentation, i.e., the
impact on overall welfare due to splitting demand
between multiple independent platforms. Our work aims
to quantify how much platform fragmentation degrades
the efficiency of the system. In particular, we focus
on a setting where demand is exogenously split between
multiple platforms, and study the increase in supply
rebalancing costs incurred by each platform to meet
this demand, vis-a-vis the cost incurred by a
centralized platform serving the aggregate demand. We
show under a large-market scaling, this
Price-of-Fragmentation undergoes a phase transition,
wherein, depending on the nature of the exogenous
demand, the additional cost due to fragmentation either
vanishes or grows unbounded. We provide conditions that
characterize which regime applies to any given system,
and discuss implications of these findings on how such
platforms should be regulated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Amjad:2018:CDE,
author = "Muhammad J. Amjad and Devavrat Shah",
title = "Censored Demand Estimation in Retail",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "17--19",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, the question of interest is estimating
true demand of a product at a given store location and
time period in the retail environment based on a single
noisy and potentially censored observation. To address
this question, we introduce a non-parametric framework
to make inference from multiple time series. Somewhat
surprisingly, we establish that the algorithm
introduced for the purpose of ``matrix completion'' can
be used to solve the relevant inference problem.
Specifically, using the Universal Singular Value
Thresholding (USVT) algorithm [2], we show that our
estimator is consistent: the average mean squared error
of the estimated average demand with respect to the
true average demand goes to 0 as the number of store
locations and time intervals increase to infty. We
establish naturally appealing properties of the
resulting estimator both analytically as well as
through a sequence of instructive simulations. Using a
real dataset in retail (Walmart), we argue for the
practical relevance of our approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:2018:NMM,
author = "Margaret Martonosi",
title = "New Metrics and Models for a Post-{ISA} Era: Managing
Complexity and Scaling Performance in Heterogeneous
Parallelism and {Internet-of-Things}",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "20--20",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219625",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Pushed by both application and technology trends,
today's computer systems employ unprecedented levels of
heterogeneity, parallelism, and complexity as they seek
to extend performance scaling and support new
application domains. From datacenters to
Internet-of-Things (IoT) devices, these scaling gains
come at the expense of degraded hardware-software
abstraction layers, increased complexity at the
hardware-software interface, and increased challenges
for software reliability, interoperability, and
performance portability This talk will explore how new
metrics, models, and analysis techniques can be
effective in this ``Post-ISA'' era of shifting
abstractions. The talk will cover hardware and software
design opportunities, methods for formal verification,
and a look into the implications on technologies like
IoT.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:2018:DSM,
author = "Sem Borst and Martin Zubeldia",
title = "Delay Scaling in Many-Sources Wireless Networks
without Queue State Information",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "21--23",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219626",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine a canonical scenario where several wireless
data sources generate sporadic delay-sensitive messages
that need to be transmitted to a common access point.
The access point operates in a time-slotted fashion,
and can instruct the various sources in each slot with
what probability to transmit a message, if they have
any. When several sources transmit simultaneously, the
access point can detect a collision, but is unable to
infer the identities of the sources involved. While the
access point can use the channel activity observations
to obtain estimates of the queue states at the various
sources, it does not have any explicit queue length
information otherwise. We explore the achievable delay
performance in a regime where the number of sources n
grows large while the relative load remains fixed. We
establish that, under any medium access algorithm
without queue state information, the average delay must
be at least of the order of n slots when the load
exceeds some threshold lambda* < 1. This demonstrates
that bounded delay can only be achieved if a positive
fraction of the system capacity is sacrificed.
Furthermore, we introduce a scalable Two-Phase
algorithm which achieves a delay upper bounded
uniformly in n when the load is below e -1, and a delay
of the order of n slots when the load is between e -1
and 1. Additionally, this algorithm provides robustness
against correlated source activity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berger:2018:PBO,
author = "Daniel S. Berger and Nathan Beckmann and Mor
Harchol-Balter",
title = "Practical Bounds on Optimal Caching with Variable
Object Sizes",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "24--26",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219627",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many recent caching systems aim to improve miss
ratios, but there is no good sense among practitioners
of how much further miss ratios can be improved. In
other words, should the systems community continue
working on this problem? Currently, there is no
principled answer to this question. In practice, object
sizes often vary by several orders of magnitude, where
computing the optimal miss ratio (OPT) is known to be
NP-hard. The few known results on caching with variable
object sizes provide very weak bounds and are
impractical to compute on traces of realistic length.
We propose a new method to compute upper and lower
bounds on OPT. Our key insight is to represent caching
as a min-cost flow problem, hence we call our method
the flow-based offline optimal (FOO). We prove that,
under simple independence assumptions, FOO's bounds
become tight as the number of objects goes to infinity.
Indeed, FOO's error over 10M requests of production CDN
and storage traces is negligible: at most 0.3\%. FOO
thus reveals, for the first time, the limits of caching
with variable object sizes. While FOO is very accurate,
it is computationally impractical on traces with
hundreds of millions of requests. We therefore extend
FOO to obtain more efficient bounds on OPT, which we
call practical flow-based offline optimal (PFOO). We
evaluate PFOO on several full production traces and use
it to compare OPT to prior online policies. This
analysis shows that current caching systems are in fact
still far from optimal, suffering 11--43\% more cache
misses than OPT, whereas the best prior offline bounds
suggest that there is essentially no room for
improvement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2018:RPS,
author = "Jian Tan and Guocong Quan and Kaiyi Ji and Ness
Shroff",
title = "On Resource Pooling and Separation for {LRU} Caching",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "27--27",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219628",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Caching systems using the Least Recently Used (LRU)
principle have now become ubiquitous. A fundamental
question for these systems is whether the cache space
should be pooled together or divided to serve multiple
flows of data item requests in order to minimize the
miss probabilities. In this paper, we show that there
is no straight yes or no answer to this question, and
depends on complex combinations of critical factors,
including, e.g., request rates, overlapped data items
across different request flows, data item popularities
and their sizes. To this end, we characterize the
performance of multiple flows of data item requests
under resource pooling and separation when the cache
size is large. Analytically we show that it is
asymptotically optimal to jointly serve multiple flows
if their data item sizes and popularity distributions
are similar, and their arrival rates do not differ
significantly; the self-organizing property of LRU
caching automatically optimizes the resource allocation
among them asymptotically. Otherwise, separating these
flows could be better, e.g., when data sizes vary
significantly. We also quantify critical points beyond
which resource pooling is better than separation for
each of the flows when the overlapped data items exceed
certain levels. These results provide new insights on
the performance of caching systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2018:ORO,
author = "Lin Yang and Wing Shing Wong and Mohammad H.
Hajiesmaili",
title = "An Optimal Randomized Online Algorithm for {QoS}
Buffer Management",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "28--30",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219629",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The QoS buffer management problem, with significant
and diverse computer applications, e.g., in online
cloud resource allocation problems, is a classic online
admission control problem in the presence of resource
constraints. In its basic setting, packets with
different values, arrive in online fashion to a
switching node with limited buffer size. Then, the
switch needs to make an immediate decision to either
admit or reject the incoming packet based on the value
of the packet and its buffer availability. The
objective is to maximize the cumulative profit of the
admitted packets, while respecting the buffer
constraint. Even though the QoS buffer management
problem was proposed more than a decade ago, no optimal
online solution has been proposed in the literature.
This paper proposes an optimal randomized online
algorithm for this problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liang:2018:MQL,
author = "Qingkai Liang and Eytan Modiano",
title = "Minimizing Queue Length Regret Under Adversarial
Network Models",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "31--32",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219630",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic models have been dominant in network
optimization theory for over two decades, due to their
analytical tractability. However, these models fail to
capture non-stationary or even adversarial network
dynamics which are of increasing importance for
modeling the behavior of networks under malicious
attacks or characterizing short-term transient
behavior. In this paper, we focus on minimizing queue
length regret under adversarial network models, which
measures the finite-time queue length difference
between a causal policy and an ``oracle'' that knows
the future. Two adversarial network models are
developed to characterize the adversary's behavior. We
provide lower bounds on queue length regret under these
adversary models and analyze the performance of two
control policies (i.e., the MaxWeight policy and the
Tracking Algorithm). We further characterize the
stability region under adversarial network models, and
show that both the MaxWeight policy and the Tracking
Algorithm are throughput-optimal even in adversarial
settings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Freeman:2018:DPS,
author = "Rupert Freeman and Seyed Majid Zahedi and Vincent
Conitzer and Benjamin C. Lee",
title = "Dynamic Proportional Sharing: a Game-Theoretic
Approach",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "33--35",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219631",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sharing computational resources amortizes cost and
improves utilization and efficiency. When agents pool
their resources, each becomes entitled to a portion of
the shared pool. Static allocations in each round can
guarantee entitlements and are strategy-proof, but
efficiency suffers because allocations do not reflect
variations in agents' demands for resources across
rounds. Dynamic allocation mechanisms assign resources
to agents across multiple rounds while guaranteeing
agents their entitlements. Designing dynamic mechanisms
is challenging, however, when agents are strategic and
can benefit by misreporting their demands for
resources. In this paper, we show that dynamic
allocation mechanisms based on max-min fail to
guarantee entitlements, strategy-proofness or both. We
propose the flexible lending (FL) mechanism and show
that it satisfies strategy-proofness and guarantees at
least half of the utility from static allocations while
providing an asymptotic efficiency guarantee. Our
simulations with real and synthetic data show that the
performance of the flexible lending mechanism is
comparable to that of state-of-the-art mechanisms,
providing agents with at least 0.98x, and on average
15x, of their utility from static allocations. Finally,
we propose the T -period mechanism and prove that it
satisfies strategy-proofness and guarantees
entitlements for T le 2.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2018:SOC,
author = "Ziv Scully and Mor Harchol-Balter and Alan
Scheller-Wolf",
title = "{SOAP}: One Clean Analysis of All Age-Based Scheduling
Policies",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "36--38",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219632",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an extremely broad class of M/G/1
scheduling policies called SOAP: Schedule Ordered by
Age-based Priority. The SOAP policies include almost
all scheduling policies in the literature as well as an
infinite number of variants which have never been
analyzed, or maybe not even conceived. SOAP policies
range from classic policies, like first-come,
first-serve (FCFS), foreground-background (FB),
class-based priority, and shortest remaining processing
time (SRPT); to much more complicated scheduling rules,
such as the famously complex Gittins index policy and
other policies in which a job's priority changes
arbitrarily with its age. While the response time of
policies in the former category is well understood,
policies in the latter category have resisted response
time analysis. We present a universal analysis of all
SOAP policies, deriving the mean and Laplace--Stieltjes
transform of response time. The full version of this
work appears in POMACS [Scully et al., 2018].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anand:2018:WIB,
author = "Arjun Anand and Gustavo de Veciana",
title = "A {Whittle}'s Index Based Approach for {QoE}
Optimization in Wireless Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "39--39",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219633",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The design of schedulers to optimize heterogeneous
users' Quality of Experience (QoE) remains a
challenging and important problem for wireless systems.
Our paper ([1]) explores three inter-related aspects of
this problem: (1) non-linear relationships between a
user's QoE and flow delays; (2) managing load dependent
QoE trade-offs among heterogeneous application classes;
and (3), striking a good balance between opportunistic
scheduling and greedy QoE optimization. To that end we
study downlink schedulers which minimize the expected
cost modeled by convex functions of flow delays for
users with heterogeneous channel rate variations. The
essential features of this challenging problem are
modeled as a Markov Decision Process to which we apply
Whittle's relaxation, which in turn is shown to be
indexable. Based on the Whittle's relaxation we develop
a new scheduling policy, Opportunistic Delay Based
Index Policy (ODIP). We then prove various structural
properties for ODIP which result in closed form
expressions for Whittle's indices under different
scheduler scenarios. Using extensive simulations we
show that ODIP scheduler provides a robust means to
realize complex QoE trade-offs for a range of system
loads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleinberg:2018:ITO,
author = "Jon Kleinberg",
title = "Inherent Trade-Offs in Algorithmic Fairness",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "40--40",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219634",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent discussion in both the academic literature and
the public sphere about classification by algorithms
has involved tension between competing notions of what
it means for such a classification to be fair to
different groups. We consider several of the key
fairness conditions that lie at the heart of these
debates, and discuss recent research establishing
inherent trade-offs between these conditions. We also
consider a variety of methods for promoting fairness
and related notions for classification and selection
problems that involve sets rather than just
individuals.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2018:OAO,
author = "Lin Yang and Lei Deng and Mohammad H. Hajiesmaili and
Cheng Tan and Wing Shing Wong",
title = "An Optimal Algorithm for Online Non-Convex Learning",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "41--43",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219635",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many online learning paradigms, convexity plays a
central role in the derivation and analysis of online
learning algorithms. The results, however, fail to be
extended to the non-convex settings, which are
necessitated by tons of recent applications. The Online
Non-Convex Learning problem generalizes the classic
Online Convex Optimization framework by relaxing the
convexity assumption on the cost function (to a
Lipschitz continuous function) and the decision set.
The state-of-the-art result for {\o}nco demonstrates
that the classic Hedge algorithm attains a sublinear
regret of $ O(\sqrt T \log T) $. The regret lower bound
for {\o}co, however, is $ \Omega (\sqrt T) $, and to
the best of our knowledge, there is no result in the
context of the {\o}nco problem achieving the same
bound. This paper proposes the Online Recursive
Weighting algorithm with regret of $ O(\sqrt T) $,
matching the tight regret lower bound for the {\o}co
problem, and fills the regret gap between the
state-of-the-art results in the online convex and
non-convex optimization problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Duran:2018:AOC,
author = "Santiago Duran and Ina Maria Verloop",
title = "Asymptotic Optimal Control of {Markov}-Modulated
Restless Bandits",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "44--46",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219636",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies optimal control subject to changing
conditions. This is an area that recently received a
lot of attention as it arises in numerous situations in
practice. Some applications being cloud computing
systems with fluctuating arrival rates, or the
time-varying capacity as encountered in power-aware
systems or wireless downlink channels. To study this,
we focus on a restless bandit model, which has proved
to be a powerful stochastic optimization framework to
model scheduling of activities. This paper is a first
step to its optimal control when restless bandits are
subject to changing conditions. We consider the
restless bandit problem in an asymptotic regime, which
is obtained by letting the population of bandits grow
large, and letting the environment change relatively
fast. We present sufficient conditions for a policy to
be asymptotically optimal and show that a set of
priority policies satisfies these. Under an
indexability assumption, an averaged version of
Whittle's index policy is proved to be inside this set
of asymptotic optimal policies. The performance of the
averaged Whittle's index policy is numerically
evaluated for a multi-class scheduling problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Magureanu:2018:OLO,
author = "Stefan Magureanu and Alexandre Proutiere and Marcus
Isaksson and Boxun Zhang",
title = "Online Learning of Optimally Diverse Rankings",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "47--49",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219637",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Search engines answer users' queries by listing
relevant items (e.g. documents, songs, products, web
pages, \ldots{}). These engines rely on algorithms that
learn to rank items so as to present an ordered list
maximizing the probability that it contains relevant
item. The main challenge in the design of
learning-to-rank algorithms stems from the fact that
queries often have different meanings for different
users. In absence of any contextual information about
the query, one often has to adhere to the diversity
principle, i.e., to return a list covering the various
possible topics or meanings of the query. To formalize
this learning-to-rank problem, we propose a natural
model where (i) items are categorized into topics, (ii)
users find items relevant only if they match the topic
of their query, and (iii) the engine is not aware of
the topic of an arriving query, nor of the frequency at
which queries related to various topics arrive, nor of
the topic-dependent click-through-rates of the items.
For this problem, we devise LDR (Learning Diverse
Rankings), an algorithm that efficiently learns the
optimal list based on users' feedback only. We show
that after T queries, the regret of LDR scales as
O((N-L)log(T)) where N is the number of all items. This
scaling cannot be improved, i.e., LDR is order
optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Talebi:2018:LPF,
author = "Mohammad Sadegh Talebi and Alexandre Proutiere",
title = "Learning Proportionally Fair Allocations with Low
Regret",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "50--52",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219638",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We address the problem of learning Proportionally Fair
(PF) allocations in parallel server systems with
unknown service rates. We provide the first algorithms,
to our knowledge, for learning such allocations with
sub-linear regret.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yun:2018:MAB,
author = "Donggyu Yun and Sumyeong Ahn and Alexandre Proutiere
and Jinwoo Shin and Yung Yi",
title = "Multi-armed Bandit with Additional Observations",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "53--55",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219639",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study multi-armed bandit (MAB) problems with
additional observations, where in each round, the
decision maker selects an arm to play and can also
observe rewards of additional arms (within a given
budget) by paying certain costs. We propose algorithms
that are asymptotic-optimal and order-optimal in their
regrets under the settings of stochastic and
adversarial rewards, respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wei:2018:OLW,
author = "Xiaohan Wei and Hao Yu and Michael J. Neely",
title = "Online Learning in Weakly Coupled {Markov} Decision
Processes: a Convergence Time Study",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "56--58",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219640",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider multiple parallel Markov decision
processes (MDPs) coupled by global constraints, where
the time varying objective and constraint functions can
only be observed after the decision is made. Special
attention is given to how well the decision maker can
perform in T slots, starting from any state, compared
to the best feasible randomized stationary policy in
hindsight. We develop a new distributed online
algorithm where each MDP makes its own decision each
slot after observing a multiplier computed from past
information. While the scenario is significantly more
challenging than the classical online learning context,
the algorithm is shown to have a tight $ O(\sqrt T) $
regret and constraint violations simultaneously. To
obtain such a bound, we combine several new ingredients
including ergodicity and mixing time bound in weakly
coupled MDPs, a new regret analysis for online
constrained optimization, a drift analysis for queue
processes, and a perturbation analysis based on Farkas'
Lemma.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zheng:2018:HCL,
author = "Pengfei Zheng and Benjamin C. Lee",
title = "{Hound}: Causal Learning for Datacenter-scale
Straggler Diagnosis",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "59--61",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219641",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stragglers are exceptionally slow tasks within a job
that delay its completion. Stragglers, which are
uncommon within a single job, are pervasive in
datacenters with many jobs. We present Hound, a
statistical machine learning framework that infers the
causes of stragglers from traces of datacenter-scale
jobs. Hound is designed to achieve several objectives:
datacenter-scale diagnosis, unbiased inference,
interpretable models, and computational efficiency. We
demonstrate Hound's capabilities for a production trace
from Google's warehouse-scale datacenters and two Spark
traces from Amazon EC2 clusters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nitu:2018:WSS,
author = "Vlad Nitu and Aram Kocharyan and Hannas Yaya and Alain
Tchana and Daniel Hagimont and Hrachya Astsatryan",
title = "Working Set Size Estimation Techniques in Virtualized
Environments: One Size Does not Fit All",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "62--63",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219642",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "Energy consumption is a primary concern for
datacenters' management. Numerous datacenters are
relying on virtualization, as it provides flexible
resource management means such as virtual machine (VM)
checkpoint/restart, migration and consolidation.
However, one of the main hindrances to server
consolidation is physical memory. In nowadays cloud,
memory is generally statically allocated to VMs and
wasted if not used. Techniques (such as ballooning)
were introduced for dynamically reclaiming memory from
VMs, such that only the needed memory is provisioned to
each VM. However, the challenge is to precisely monitor
the needed memory, i.e., the working set of each VM. In
this paper, we thoroughly review the main techniques
that were proposed for monitoring the working set of
VMs. Additionally, we have implemented the main
techniques in the Xen hypervisor and we have defined
different metrics in order to evaluate their
efficiency. Based on the evaluation results, we propose
Badis, a system which combines several of the existing
solutions, using the right solution at the right time.
We also propose a consolidation extension which
leverages Badis in order to pack the VMs based on the
working set size and not the booked memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2018:PSF,
author = "Shenglin Zhang and Ying Liu and Weibin Meng and
Zhiling Luo and Jiahao Bu and Sen Yang and Peixian
Liang and Dan Pei and Jun Xu and Yuzhi Zhang and Yu
Chen and Hui Dong and Xianping Qu and Lei Song",
title = "{PreFix}: Switch Failure Prediction in Datacenter
Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "64--66",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219643",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In modern datacenter networks (DCNs), failures of
network devices are the norm rather than the exception,
and many research efforts have focused on dealing with
failures after they happen. In this paper, we take a
different approach by predicting failures, thus the
operators can intervene and ``fix'' the potential
failures before they happen. Specifically, in our
proposed system, named PreFix, we aim to determine
during runtime whether a switch failure will happen in
the near future. The prediction is based on the
measurements of the current switch system status and
historical switch hardware failure cases that have been
carefully labelled by network operators. Our key
observation is that failures of the same switch model
share some common syslog patterns before failures
occur, and we can apply machine learning methods to
extract the common patterns for predicting switch
failures. Our novel set of features (message template
sequence, frequency, seasonality and surge) for machine
learning can efficiently deal with the challenges of
noises, sample imbalance, and computation overhead. We
evaluated PreFix on a data set collected from 9397
switches (3 different switch models) deployed in more
than 20 datacenters owned by a top global search engine
in a 2-year period. PreFix achieved an average of
61.81\% recall and $ 1.84 \times 10^{-5} $ false
positive ratio, outperforming the other failure
prediction methods for computers and ISP devices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Psychas:2018:NPV,
author = "Konstantinos Psychas and Javad Ghaderi",
title = "On Non-Preemptive {VM} Scheduling in the Cloud",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "67--69",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219644",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "We study the problem of scheduling VMs (Virtual
Machines) in a distributed server platform, motivated
by cloud computing applications. The VMs arrive
dynamically over time to the system, and require a
certain amount of resources (e.g. memory, CPU, etc) for
the duration of their service. To avoid costly
preemptions, we consider non-preemptive scheduling:
Each VM has to be assigned to a server which has enough
residual capacity to accommodate it, and once a VM is
assigned to a server, its service cannot be disrupted
(preempted). Prior approaches to this problem either
have high complexity, require synchronization among the
servers, or yield queue sizes/delays which are
excessively large. We propose a non-preemptive
scheduling algorithm that resolves these issues. In
general, given an approximation algorithm to Knapsack
with approximation ratio r, our scheduling algorithm
can provide $ r \beta $ fraction of the throughput
region for $ \beta < r $. In the special case of a
greedy approximation algorithm to Knapsack, we further
show that this condition can be relaxed to $ \beta < 1
$. The parameters $ \beta $ and $r$ can be tuned to
provide a tradeoff between achievable throughput,
delay, and computational complexity of the scheduling
algorithm. Finally extensive simulation results using
both synthetic and real traffic traces are presented to
verify the performance of our algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Islam:2018:WSL,
author = "Mohammad A. Islam and Luting Yang and Kiran Ranganath
and Shaolei Ren",
title = "Why Some Like It Loud: Timing Power Attacks in
Multi-tenant Data Centers Using an Acoustic Side
Channel",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "70--72",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219645",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The common practice of power infrastructure
oversubscription in data centers exposes dangerous
vulnerabilities to well-timed power attacks (i.e.,
maliciously timed power loads), possibly creating
outages and resulting in multimillion-dollar losses. In
this paper, we focus on the emerging threat of power
attacks in a multi-tenant data center, where a
malicious tenant (i.e., attacker) aims at compromising
the data center availability by launching power attacks
and overloading the power capacity. We discover a novel
acoustic side channel resulting from servers' cooling
fan noise, which can help the attacker time power
attacks at the moments when benign tenants' power usage
is high. Concretely, we exploit the acoustic side
channel by: (1) employing a high-pass filter to filter
out the air conditioner's noise; (2) applying
non-negative matrix factorization with sparsity
constraint to demix the received aggregate noise and
detect periods of high power usage by benign tenants;
and (3) designing a state machine to guide power
attacks. We run experiments in a practical data center
environment as well as simulation studies, and
demonstrate that the acoustic side channel can assist
the attacker with detecting more than 50\% of all
attack opportunities, representing state-of-the-art
timing accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahmadian:2018:ECH,
author = "Saba Ahmadian and Onur Mutlu and Hossein Asadi",
title = "{ECI-Cache}: a High-Endurance and Cost-Efficient {I/O}
Caching Scheme for Virtualized Platforms",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "73--73",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219646",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
abstract = "In recent years, high interest in using Virtual
Machines (VMs) in data centers and cloud computing has
significantly increased the demand for high-performance
data storage systems. A straightforward approach to
providing a high-performance storage system is using
Solid-State Drives (SSDs). Inclusion of SSDs in storage
systems, however, imposes significantly higher cost
compared to Hard Disk Drives (HDDs). Recent studies
suggest using SSDs as a caching layer for HDD-based
storage subsystems in virtualized platforms. Such
studies neglect to address the endurance and cost of
SSDs, which can significantly affect the efficiency of
I/O caching. Moreover, previous studies only configure
the cache size to provide the required performance
level for each VM, while neglecting other important
parameters such as cache write policy and request type,
which can adversely affect both performance-per-cost
and endurance. In this paper, we propose a new
high-Endurance and Cost-efficient I/O caching
(ECI-Cache) scheme for virtualized platforms in
large-scale data centers, which improves both
performance-per-cost and endurance of the SSD cache.
ECI-Cache dynamically assigns (1) an efficient cache
size for each VM, to maximize the overall performance
of the running VMs and (2) an effective write policy
for each VM, to enhance the endurance and
performance-per-cost of the storage subsystem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2018:SMV,
author = "Zhaowei Tan and Yuanjie Li and Qianru Li and Zhehui
Zhang and Zhehan Li and Songwu Lu",
title = "Supporting Mobile {VR} in {LTE} Networks: How Close
Are We?",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "74--74",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219647",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, we have witnessed a boom in virtual
reality (VR). 21 million wearable VR headsets are
projected to be shipped in 2017, resulting in \$4.9
billion revenue [3]. Among all the options, the mobile
VR empowered by phones is most popular, contributing
98\% of the sales [1]. Despite at early stage, it
appeals to the general public with low cost ($ \approx
$ \$100) and excellent convenience (no wiring). Mobile
VR aims to offer users ubiquitous and high-fidelity
experiences. If achieved, users can access VR
``anytime, anywhere'', regardless of whether they roam
or remain static. They also receive smooth,
high-resolution panorama views throughout VR
experience. It thus demands high bandwidth and
stringent end-to-end latency to synchronize the
graphical displays with the user motions. A promising
approach to enabling ubiquitous mobile VR is the
edge-based scheme over 4G LTE networks. As shown in
Figure 1, the VR headset reports sensory user motions
to edge servers through the LTE network. The edge
servers accept user input and deliver the requested
graphics. They thus offload computation-intensive
processing tasks from the battery-powered user devices.
Ubiquitous access is provided by the LTE network, the
only large-scale wireless infrastructure offering
universal coverage and seamless mobility. In this work
[2], we examine several common perceptions, and study
medium-quality mobile VR (60 frames per second and
1080p resolution) over operational LTE networks. We
show that, contrary to common understandings, bandwidth
tends to be not the main bottleneck for medium-quality
VR. Instead, network latency poses the biggest obstacle
for the mobile VR. A bulk portion of network latency
does not stem from wireless data transfer, but from LTE
signaling operations to facilitate wireless data
delivery. These operations exhibit two categories of
latency deficiency: (1) Inter-protocol incoordination,
in which problematic interplays between protocols
unnecessarily incur delays; (2) Single-protocol
overhead, in which each protocol's signaling actions
unavoidably incur delays. Our analysis, together with
8-month empirical studies over 4 US mobile carriers,
looks into five common beliefs on LTE network latency
under both static and mobile scenarios and shows that
they are wrong. In fact, they pose as roadblocks to
enable mobile VR. Our three findings are centered on
three existing mechanisms for data-plane signaling,
which are all well known in the literature. However,
their deficiencies have not been studied from the
latency perspective, particularly for delay-sensitive
mobile VR applications. We further describe a new
finding that incurs long latency but has not been
reported in the literature. Moreover, we quantify the
impact of each finding under VR traffic. We devise
LTE-VR, a device-side solution to mobile VR without
changing hardware or infrastructure. It adapts the
signaling operations, while being standard compliant.
It reactively mitigates unnecessary latency among
protocols and proactively masks unavoidable latency
inside each protocol. It exploits two ideas. First, it
applies cross-layer design to ensure fast loss
detection and recovery and minimize duplicates during
handover. Second, it leverages rich side-channel info
only available at the device to reduce the latency. We
have prototyped LTE-VR with USRP and OpenAirInterface.
Our evaluation shows that, LTE-VR reduces the frequency
of frames that miss the human tolerance by $ 3.7 \times
$ on average. It meets the delay tolerance with 95\%
probability, which approximates the lower bounds. It
also achieves latency reduction comparable to $ 10
\times $ wireless bandwidth expansion. Furthermore,
LTE-VR incurs marginal signaling overhead (5\% more
messages) and extra resource (0.1\% more bandwidth and
2.3\% more radio grants). We further note that our
findings would carry over to the upcoming 5G. LTE-VR is
as well applicable to 5G scenarios. It complements the
proposed 5G radio, while provides hints for 5G
signaling design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pignolet:2018:TNP,
author = "Yvonne-Anne Pignolet and Stefan Schmid and Gilles
Tredan",
title = "Tomographic Node Placement Strategies and the Impact
of the Routing Model",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "75--77",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219648",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tomographic techniques can be used for the fast
detection of link failures at low cost. Our paper
studies the impact of the routing model on tomographic
node placement costs. We present a taxonomy of path
routing models and provide optimal and near-optimal
algorithms to deploy a minimal number of asymmetric and
symmetric tomography nodes for basic network topologies
under different routing model classes. Intriguingly, we
find that in many cases routing according to a more
restrictive routing model gives better results:
compared to a more general routing model, computing a
good placement is algorithmically more tractable and
does not entail high monitoring costs, a desirable
trade-off in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vlachou:2018:LTL,
author = "Christina Vlachou and Ioannis Pefkianakis and Kyu-Han
Kim",
title = "{LTERadar}: Towards {LTE}-Aware {Wi-Fi} Access
Points",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "78--80",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219649",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Major LTE hardware vendors (e.g. Qualcomm, Ericsson),
mobile service providers (e.g. Verizon, T-Mobile), and
standardization bodies (e.g. LTE-U forum, 3GPP) are
extending LTE networks into unlicensed spectrum bands
to boost the speeds and coverage of mobile networks.
However, the deployment of LTE in unlicensed has raised
serious concerns regarding their adverse impact on
Wi-Fi networks in the same bands. We design LTERadar, a
lightweight interference detector, that runs on Wi-Fi
devices and accurately detects LTE interference in real
time. LTERadar is a purely software-based solution that
is independent of specific hardware or technology of
the LTE interferer (e.g. LTE-U, LAA, the dominant LTE
unlicensed protocols). Our implementation and
evaluation with off-the-shelf Wi-Fi APs show that
LTERadar achieves more than 90\% of the interference
detection accuracy in operational networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kuhnle:2018:NRL,
author = "Alan Kuhnle and Victoria G. Crawford and My T. Thai",
title = "Network Resilience and the Length-Bounded Multicut
Problem: Reaching the Dynamic Billion-Scale with
Guarantees",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "81--83",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219650",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by networked systems in which the
functionality of the network depends on vertices in the
network being within a bounded distance T of each
other, we study the length-bounded multicut problem:
given a set of pairs, find a minimum-size set of edges
whose removal ensures the distance between each pair
exceeds T. We introduce the first algorithms for this
problem capable of scaling to massive networks with
billions of edges and nodes: three highly scalable
algorithms with worst-case performance ratios.
Furthermore, one of our algorithms is fully dynamic,
capable of updating its solution upon incremental
vertex / edge additions or removals from the network
while maintaining its performance ratio. Finally, we
show that unless NP $ \subseteq $ BPP, there is no
polynomial-time, approximation algorithm with
performance ratio better than Omega (T), which matches
the ratio of our dynamic algorithm up to a constant
factor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2018:PIA,
author = "Sen Yang and He Yan and Zihui Ge and Dongmei Wang and
Jun Xu",
title = "Predictive Impact Analysis for Designing a Resilient
Cellular Backhaul Network",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "84--86",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219651",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Backhaul transport network design and optimization for
cellular service providers involve a unique challenge
stemming from the fact that an end-user's equipment
(UE) is within the radio reach of multiple cellular
towers: It is hard to evaluate the impact of the
failure of the UE's primary serving tower on the UE,
because the UE may simply switch to get service from
other nearby cellular towers. To overcome this
challenge, one needs to quantify the cellular service
redundancy among the cellular towers riding on that
transport circuit and their nearby cellular towers,
which in turn requires a comprehensive understanding of
the radio signal profile in the area of the impacted
towers, the spatial distribution of UEs therein, and
their expected workload (e.g., calls, data throughput).
In this work, we develop a novel methodology for
assessing the service impact of any hypothetical
cellular tower outage scenario, and implement it in an
operational system named Tower Outage Impact Predictor
(TOIP). Our evaluations, using both synthetic data and
historical real tower outages in a large operational
cellular network, show conclusively that TOIP gives an
accurate assessment of various tower outage scenarios,
and can provide critical input data towards designing a
reliable cellular backhaul transport network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Subramanian:2018:SFT,
author = "Kausik Subramanian and Loris D'Antoni and Aditya
Akella",
title = "Synthesis of Fault-Tolerant Distributed Router
Configurations",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "87--89",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219652",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Operators of modern networks require support for
diverse and complex end-to-end policies, such as,
middlebox traversals, isolation, and traffic
engineering. While Software-defined Networking (SDN)
provides centralized custom routing functionality in
networks to realize these policies, many networks still
deploy ``legacy'' control planes running distributed
routing protocols like OSPF and BGP because these
protocols are scalable and robust to failures. However,
realization of policies by distributed control plane
configurations is manual and error-prone. We present
Zeppelin, a system for automatically generating
policy-compliant control planes that also behave well
under majority of small network failures. Zeppelin
differs from existing approaches in that it uses
policy-compliant paths to guide the synthesis process
instead of directly generating policy-compliant
configurations. We show that Zeppelin synthesizes
highly resilient and policy-compliant configurations
for real topologies with up to 80 routers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2018:RFM,
author = "Kuang Xu and Se-Young Yun",
title = "Reinforcement with Fading Memories",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "90--92",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219653",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the effect of imperfect memory on decision
making in the context of a stochastic sequential
action-reward problem. An agent chooses a sequence of
actions which generate discrete rewards at different
rates. She is allowed to make new choices at rate $
\beta $, while past rewards disappear from her memory
at rate $ \mu $. We focus on a family of decision rules
where the agent makes a new choice by randomly
selecting an action with a probability approximately
proportional to the amount of past rewards associated
with each action in her memory. We provide closed-form
formulae for the agent's steady-state choice
distribution in the regime where the memory span is
large $ (\mu \to 0) $, and show that the agent's
success critically depends on how quickly she updates
her choices relative to the speed of memory decay. If $
\beta \gg \mu $, the agent almost always chooses the
best action, i.e., the one with the highest reward
rate. Conversely, if $ \beta \ll \mu $, the agent
chooses an action with a probability roughly
proportional to its reward rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doan:2018:CRD,
author = "Thinh T. Doan and Carolyn L. Beck and R. Srikant",
title = "On the Convergence Rate of Distributed Gradient
Methods for Finite-Sum Optimization under Communication
Delays",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "93--95",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219654",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by applications in machine learning and
statistics, we study distributed optimization problems
over a network of processors, where the goal is to
optimize a global objective composed of a sum of local
functions. In these problems, due to the large scale of
the data sets, the data and computation must be
distributed over multiple processors resulting in the
need for distributed algorithms. In this paper, we
consider a popular distributed gradient-based consensus
algorithm, which only requires local computation and
communication. An important problem in this area is to
analyze the convergence rate of such algorithms in the
presence of communication delays that are inevitable in
distributed systems. We prove the convergence of the
gradient-based consensus algorithm in the presence of
uniform, but possibly arbitrarily large, communication
delays between the processors. Moreover, we obtain an
upper bound on the rate of convergence of the algorithm
as a function of the network size, topology, and the
inter-processor communication delays.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2018:DSM,
author = "Yudong Chen and Lili Su and Jiaming Xu",
title = "Distributed Statistical Machine Learning in
Adversarial Settings: {Byzantine} Gradient Descent",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "96--96",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219655",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the distributed statistical learning
problem over decentralized systems that are prone to
adversarial attacks. This setup arises in many
practical applications, including Google's Federated
Learning. Formally, we focus on a decentralized system
that consists of a parameter server and m working
machines; each working machine keeps N/m data samples,
where N is the total number of samples. In each
iteration, up to q of the m working machines suffer
Byzantine faults --- a faulty machine in the given
iteration behaves arbitrarily badly against the system
and has complete knowledge of the system. Additionally,
the sets of faulty machines may be different across
iterations. Our goal is to design robust algorithms
such that the system can learn the underlying true
parameter, which is of dimension d, despite the
interruption of the Byzantine attacks. In this paper,
based on the geometric median of means of the
gradients, we propose a simple variant of the classical
gradient descent method. We show that our method can
tolerate q Byzantine failures up to $ 2 (1 + \epsilon)q
\leq m > 0 $. The parameter estimate converges in $
O(\log N) $ rounds with an estimation error on the
order of $ \max \sqrt d q / N $, $ \approx \sqrt d / N
$, which is larger than the minimax-optimal error rate
$ \sqrt d / N $ in the centralized and failure-free
setting by at most a factor of $ \sqrt q $. The total
computational complexity of our algorithm is of $ O((N
d / m) \log N) $ at each working machine and $ O(m d +
k d \log 3 N) $ at the central server, and the total
communication cost is of $ O(m d \log N) $. We further
provide an application of our general results to the
linear regression problem. A key challenge arises in
the above problem is that Byzantine failures create
arbitrary and unspecified dependency among the
iterations and the aggregated gradients. To handle this
issue in the analysis, we prove that the aggregated
gradient, as a function of model parameter, converges
uniformly to the true gradient function.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2018:NNM,
author = "Mowei Wang and Yong Cui and Shihan Xiao and Xin Wang
and Dan Yang and Kai Chen and Jun Zhu",
title = "Neural Network Meets {DCN}: Traffic-driven Topology
Adaptation with Deep Learning",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "97--99",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219656",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The emerging optical/wireless topology reconfiguration
technologies have shown great potential in improving
the performance of data center networks. However, it
also poses a big challenge on how to find the best
topology configurations to support the dynamic traffic
demands. In this work, we present xWeaver, a
traffic-driven deep learning solution to infer the
high-performance network topology online. xWeaver
supports a powerful network model that enables the
topology optimization over different performance
metrics and network architectures. With the design of
properly-structured neural networks, it can
automatically derive the critical traffic patterns from
data traces and learn the underlying mapping between
the traffic patterns and topology configurations
specific to the target data center. After offline
training, xWeaver generates the optimized (or
near-optimal) topology configuration online, and can
also smoothly update its model parameters for new
traffic patterns. The experiment results show the
significant performance gain of xWeaver in supporting
smaller flow completion time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schardl:2018:CFC,
author = "Tao B. Schardl and Tyler Denniston and Damon Doucet
and Bradley C. Kuszmaul and I-Ting Angelina Lee and
Charles E. Leiserson",
title = "The {CSI} Framework for Compiler-Inserted Program
Instrumentation",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "100--102",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219657",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The CSI framework provides comprehensive static
instrumentation that a compiler can insert into a
program-under-test so that dynamic-analysis tools ---
memory checkers, race detectors, cache simulators,
performance profilers, code-coverage analyzers, etc.
--- can observe and investigate runtime behavior.
Heretofore, tools based on compiler instrumentation
would each separately modify the compiler to insert
their own instrumentation. In contrast, CSI inserts a
standard collection of instrumentation hooks into the
program-under-test. Each CSI-tool is implemented as a
library that defines relevant hooks, and the remaining
hooks are ``nulled'' out and elided during either
compile-time or link-time optimization, resulting in
instrumented runtimes on par with custom
instrumentation. CSI allows many compiler-based tools
to be written as simple libraries without modifying the
compiler, lowering the bar for the development of
dynamic-analysis tools. We have defined a standard API
for CSI and modified LLVM to insert CSI hooks into the
compiler's internal representation (IR) of the program.
The API organizes IR objects --- such as functions,
basic blocks, and memory accesses --- into flat and
compact ID spaces, which not only simplifies the
building of tools, but surprisingly enables faster
maintenance of IR-object data than do traditional hash
tables. CSI hooks contain a ``property'' parameter that
allows tools to customize behavior based on static
information without introducing overhead. CSI provides
``forensic'' tables that tools can use to associate IR
objects with source-code locations and to relate IR
objects to each other. To evaluate the efficacy of CSI,
we implemented six demonstration CSI-tools. One of our
studies shows that compiling with CSI and linking with
the ``null'' CSI-tool produces a tool-instrumented
executable that is as fast as the original
uninstrumented code. Another study, using a CSI port of
Google's ThreadSanitizer, shows that the CSI-tool
rivals the performance of Google's custom
compiler-based implementation. All other demonstration
CSI tools slow down the execution of the
program-under-test by less than 70\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jain:2018:QEC,
author = "Akshay Jain and Mahmoud Khairy and Timothy G. Rogers",
title = "A Quantitative Evaluation of Contemporary {GPU}
Simulation Methodology",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "103--105",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219658",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Contemporary Graphics Processing Units (GPUs) are used
to accelerate highly parallel compute workloads. For
the last decade, researchers in academia and industry
have used cycle-level GPU architecture simulators to
evaluate future designs. This paper performs an
in-depth analysis of commonly accepted GPU simulation
methodology, examining the effect both the workload and
the choice of instruction set architecture have on the
accuracy of a widely-used simulation infrastructure,
GPGPU-Sim. We analyze numerous aspects of the
architecture, validating the simulation results against
real hardware. Based on a characterized set of over
1700 GPU kernels, we demonstrate that while the
relative accuracy of compute-intensive workloads is
high, inaccuracies in modeling the memory system result
in much higher error when memory performance is
critical. We then perform a case study using a recently
proposed GPU architecture modification, demonstrating
that the cross-product of workload characteristics and
instruction set architecture choice can have an affect
on the predicted efficacy of the technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luo:2018:INF,
author = "Yixin Luo and Saugata Ghose and Yu Cai and Erich F.
Haratsch and Onur Mutlu",
title = "Improving {$3$D} {NAND} Flash Memory Lifetime by
Tolerating Early Retention Loss and Process Variation",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "106--106",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219659",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Compared to planar NAND flash memory, 3D NAND flash
memory uses a new flash cell design, and vertically
stacks dozens of silicon layers in a single chip. This
allows 3D NAND flash memory to increase storage density
using a much less aggressive manufacturing process
technology than planar NAND. The circuit-level and
structural changes in 3D NAND flash memory
significantly alter how different error sources affect
the reliability of the memory. Our goal is to (1)
identify and understand these new error characteristics
of 3D NAND flash memory, and (2) develop new techniques
to mitigate prevailing 3D NAND flash errors. In this
paper, we perform a rigorous experimental
characterization of real, state-of-the-art 3D NAND
flash memory chips, and identify three new error
characteristics that were not previously observed in
planar NAND flash memory, but are fundamental to the
new architecture of 3D NAND flash memory.
\beginenumerate [leftmargin=13pt] item 3D NAND flash
memory exhibits layer-to-layer process variation, a new
phenomenon specific to the 3D nature of the device,
where the average error rate of each 3D-stacked layer
in a chip is significantly different. We are the first
to provide detailed experimental characterization
results of layer-to-layer process variation in real
flash devices in open literature. Our results show that
the raw bit error rate in the middle layer can be 6$
\times $ the error rate in the top layer. item 3D NAND
flash memory experiences \emphearly retention loss, a
new phenomenon where the number of errors due to charge
leakage increases quickly within several hours after
programming, but then increases at a much slower rate.
We are the first to perform an extended-duration
observation of early retention loss over the course of
24 days. Our results show that the retention error rate
in a 3D NAND flash memory block quickly increases by an
order of magnitude within $ \sim $3 hours after
programming. item 3D NAND flash memory experiences
retention interference, a new phenomenon where the rate
at which charge leaks from a flash cell is dependent on
the amount of charge stored in neighboring flash cells.
Our results show that charge leaks at a lower rate
(i.e., the retention loss speed is slower) when the
neighboring cell is in a state that holds more charge
(i.e., a higher-voltage state). \endenumerate Our
experimental observations indicate that we must revisit
the error models and error mitigation mechanisms
devised for planar NAND flash, as they are no longer
accurate for 3D NAND flash behavior. To this end, we
develop \emphnew analytical model\chIs of (1) the
layer-to-layer process variation in 3D NAND flash
memory, and (2) retention loss in 3D NAND flash memory.
Our models estimate the raw bit error rate (RBER),
threshold voltage distribution, and the \emphoptimal
read reference voltage (i.e., the voltage at which RBER
is minimized when applied during a read operation) for
each flash page. Both models are useful for developing
techniques to mitigate raw bit errors in 3D NAND flash
memory. Motivated by our new findings and models, we
develop four new techniques to mitigate process
variation and early retention loss in 3D NAND flash
memory. Our first technique, LaVAR, reduces process
variation by fine-tuning the read reference voltage
independently for each layer. Our second technique,
LI-RAID, improves reliability by changing how pages are
grouped under the RAID (Redundant Array of Independent
Disks) error recovery technique, using information
about layer-to-layer process variation to reduce the
likelihood that the RAID recovery of a group could fail
significantly earlier during the flash lifetime than
recovery of other groups. Our third technique, ReMAR,
reduces retention errors in 3D NAND flash memory by
tracking the retention age of the data using our
retention model and adapting the read reference voltage
to data age. Our fourth technique, ReNAC, adapts the
read reference voltage to the amount of retention
interference to re-read the data after a read operation
fails. These four techniques are complementary, and can
be combined together to significantly improve flash
memory reliability. Compared to a state-of-the-art
baseline, our techniques, when combined, improve flash
memory lifetime by 1.85$ \times $. Alternatively, if a
NAND flash manufacturer wants to keep the lifetime of
the 3D NAND flash memory device constant, our
techniques reduce the storage overhead required to hold
error correction information by 78.9\%. For more
information on our new experimental characterization of
modern 3D NAND flash memory chips and our proposed
models and techniques, please refer to the full version
of our paper \citeluo.pomacs18.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2018:FGE,
author = "Xiaomeng Chen and Jiayi Meng and Y. Charlie Hu and
Maruti Gupta and Ralph Hasholzner and Venkatesan
Nallampatti Ekambaram and Ashish Singh and
Srikathyayani Srikanteswara",
title = "A Fine-grained Event-based Modem Power Model for
Enabling In-depth Modem Energy Drain Analysis",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "107--109",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219660",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cellular modems enable ubiquitous Internet
connectivities to modern smartphones, but in doing so
they become a major contributor to the smartphone
energy drain. Understanding modem energy drain requires
a detailed power model. The prior art, an RRC-state
based power model, was developed primarily to model the
modem energy drain of application data transfer. As
such, it serves well its original purpose, but is
insufficient to study detailed modem behavior, eg.
activities in the control plane. In [2], we propose a
new methodology of modeling modem power draw behavior
at the event-granularity, and develop to our knowledge
the first fine-grained modem power model that captures
the power draw of all LTE modem radio-on events in
different RRC modes. Second, we quantitatively
demonstrate the advantages of the new model over the
state-based power model under a wide variety of context
via controlled experiments. Finally, using our
fine-grained modem power model, we perform the first
detailed modem energy drain in-the-wild study involving
12 Nexus 6 phones under normal usage by 12 volunteers
spanning a total of 348 days. Our study provides the
first quantitative analysis of energy drain due to
modem control activities in the wild and exposes their
correlation with context such as location and user
mobility. In this abstracts, we introduce the essence
of the methodology and the highlighted results from the
in-the-wild study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghose:2018:WYD,
author = "Saugata Ghose and Abdullah Giray Yaglik{\c{c}}i and
Raghav Gupta and Donghyuk Lee and Kais Kudrolli and
William X. Liu and Hasan Hassan and Kevin K. Chang and
Niladrish Chatterjee and Aditya Agrawal and Mike
O'Connor and Onur Mutlu",
title = "What Your {DRAM} Power Models Are Not Telling You:
Lessons from a Detailed Experimental Study",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "110--110",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219661",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Main memory (DRAM) consumes as much as half of the
total system power in a computer today, due to the
increasing demand for memory capacity and bandwidth.
There is a growing need to understand and analyze DRAM
power consumption, which can be used to research new
DRAM architectures and systems that consume less power.
A major obstacle against such research is the lack of
detailed and accurate information on the power
consumption behavior of modern DRAM devices.
Researchers have long relied on DRAM power models that
are predominantly based off of a set of standardized
current measurements provided by DRAM vendors, called
IDD values. Unfortunately, we find that
state-of-the-art DRAM power models are often highly
inaccurate when compared with the real power consumed
by DRAM. This is because existing DRAM power models (1)
are based off of the worst-case power consumption of
devices, as vendor specifications list the current
consumed by the most power-hungry device sold; (2) do
not capture variations in DRAM power consumption due to
different data value patterns; and (3) do not account
for any variation across different devices or within a
device.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oleksenko:2018:IME,
author = "Oleksii Oleksenko and Dmitrii Kuvaiskii and Pramod
Bhatotia and Pascal Felber and Christof Fetzer",
title = "{Intel MPX} Explained: a Cross-layer Analysis of the
{Intel MPX} System Stack",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "111--112",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219662",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Memory-safety violations are the primary cause of
security and reliability issues in software systems
written in unsafe languages. Given the limited adoption
of decades-long research in software-based memory
safety approaches, as an alternative, Intel released
Memory Protection Extensions (MPX)---a
hardware-assisted technique to achieve memory safety.
In this work, we perform an exhaustive study of Intel
MPX architecture along three dimensions: (a)
performance overheads, (b) security guarantees, and (c)
usability issues. We present the first detailed root
cause analysis of problems in the Intel MPX
architecture through a cross-layer dissection of the
entire system stack, involving the hardware, operating
system, compilers, and applications. To put our
findings into perspective, we also present an in-depth
comparison of Intel MPX with three prominent types of
software-based memory safety approaches. Lastly, based
on our investigation, we propose directions for
potential changes to the Intel MPX architecture to aid
the design space exploration of future hardware
extensions for memory safety. A complete version of
this work appears in the 2018 proceedings of the ACM on
Measurement and Analysis of Computing Systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2018:RMFa,
author = "Nicolas Gast and Benny {Van Houdt}",
title = "A Refined Mean Field Approximation",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "113--113",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219663",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic models have been used to assess the
performance of computer (and other) systems for many
decades. As a direct analysis of large and complex
stochastic models is often prohibitive, approximations
methods to study their behavior have been devised. One
very popular approximation method relies on mean field
theory. Its widespread use can be explained by the
relative ease involved to define and solve a mean field
model in combination with its high accuracy for large
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellemans:2018:PDC,
author = "Tim Hellemans and Benny {Van Houdt}",
title = "On the Power-of-d-choices with Least Loaded Server
Selection",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "114--114",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219664",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by distributed schedulers that combine the
power-of-d-choices with late binding and systems that
use replication with cancellation-on-start, we study
the performance of the LL(d) policy which assigns a job
to a server that currently has the least workload among
d randomly selected servers in large-scale homogeneous
clusters. We consider general job size distributions
and propose a partial integro-differential equation to
describe the evolution of the system. This equation
relies on the earlier proven ansatz for LL(d) which
asserts that the workload distribution of any finite
set of queues becomes independent of one another as the
number of servers tends to infinity. Based on this
equation we propose a fixed point iteration for the
limiting workload distribution and study its
convergence.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2018:DQI,
author = "Xingyu Zhou and Fei Wu and Jian Tan and Kannan
Srinivasan and Ness Shroff",
title = "Degree of Queue Imbalance: Overcoming the Limitation
of Heavy-traffic Delay Optimality in Load Balancing
Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "115--115",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219665",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we argue that heavy-traffic delay
optimality is a coarse metric that does not necessarily
imply good delay performance. Specifically, we show
that any load balancing scheme is heavy-traffic delay
optimal as long as it satisfies a fairly weak
condition. This condition only requires that in the
long-term the dispatcher favors, even slightly, shorter
queues over longer queues. Hence, the empirical delay
performance of heavy-traffic delay optimal schemes can
range from very good (that of join-shortest-queue) to
very bad (arbitrarily close to the performance of
random routing). To overcome this limitation, we
introduce a new metric called degree of queue
imbalance, which measures the queue length difference
between all the servers in steady-state. Given a
heavy-traffic delay optimal load balancing scheme, we
can characterize the resultant degree of queue
imbalance. This, in turn, allows us to explicitly
differentiate between good and poor load balancing
schemes. Thus, this paper implies that good load
balancing should not only be designed to be
heavy-traffic delay optimal, but also have a low degree
of queue imbalance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berg:2018:TOP,
author = "Benjamin Berg and Jan-Pieter Dorsman and Mor
Harchol-Balter",
title = "Towards Optimality in Parallel Job Scheduling",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "116--118",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219666",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To keep pace with Moore's law, chip designers have
focused on increasing the number of cores per chip. To
effectively leverage these multi-core chips, one must
decide how many cores to assign to each job. Given that
jobs receive sublinear speedups from additional cores,
there is a tradeoff: allocating more cores to an
individual job reduces the job's runtime, but decreases
the efficiency of the overall system. We ask how the
system should assign cores to jobs so as to minimize
the mean response time over a stream of incoming jobs.
To answer this question, we develop an analytical model
of jobs running on a multi-core machine. We prove that
EQUI, a policy which continuously divides cores evenly
across jobs, is optimal when all jobs follow a single
speedup curve and have exponentially distributed sizes.
We also consider a class of ``fixed-width'' policies,
which choose a single level of parallelization, k, to
use for all jobs. We prove that, surprisingly,
fixed-width policies which use the optimal fixed level
of parallelization, k*, become near-optimal as the
number of cores becomes large. In the case where jobs
may follow different speedup curves, finding a good
scheduling policy is even more challenging. In
particular, EQUI is no longer optimal, but a very
simple policy, GREEDY*, performs well empirically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2018:CSM,
author = "Bo Jiang and Philippe Nain and Don Towsley and Saikat
Guha",
title = "On a Class of Stochastic Multilayer Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "119--121",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219667",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we introduce a new class of stochastic
multilayer networks. A stochastic multilayer network is
the aggregation of M networks (one per layer) where
each is a subgraph of a foundational network G. Each
layer network is the result of probabilistically
removing links and nodes from G. The resulting network
includes any link that appears in at least K layers.
This model is an instance of a non-standard site-bond
percolation model. Two sets of results are obtained:
first, we derive the probability distribution that the
M -layer network is in a given configuration for some
particular graph structures (explicit results are
provided for a line and an algorithm is provided for a
tree), where a configuration is the collective state of
all links (each either active or inactive). Next, we
show that for appropriate scalings of the node and link
selection processes in a layer, links are
asymptotically independent as the number of layers goes
to infinity, and follow Poisson distributions.
Numerical results are provided to highlight the impact
of having several layers on some metrics of interest
(including expected size of the cluster a node belongs
to in the case of the line). This model finds
applications in wireless communication networks with
multichannel radios, multiple social networks with
overlapping memberships, transportation networks, and,
more generally, in any scenario where a common set of
nodes can be linked via co-existing means of
connectivity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zeng:2018:FJQ,
author = "Yun Zeng and Jian Tan and Cathy Honghui Xia",
title = "Fork and Join Queueing Networks with Heavy Tails:
Scaling Dimension and Throughput Limit",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "122--124",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219668",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel and distributed computing systems are
foundational to the success of cloud computing and big
data analytics. Fork-Join Queueing Networks with
Blocking (FJQN/Bs) are natural models for such systems.
While engineering solutions have long been made to
build and scale such systems, it is challenging to
rigorously characterize the throughput performance of
ever-growing systems, especially in the presence of
heavy-tailed delays. In this paper, we utilize an
infinite sequence of FJQN/Bs to study the throughput
limit and focus on regularly varying service times with
index $ \alpha > 1 $. We introduce two novel geometric
concepts --- scaling dimension and extended metric
dimension --- and show that an infinite sequence of
FJQN/Bs is throughput scalable if the extended metric
dimension $ < \alpha - 1 $ and only if the scaling
dimension $ \le \alpha - 1 $. These results provide new
insights on the scalability of a rich class of
FJQN/Bs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2018:PBF,
author = "Thomas Bonald and C{\'e}line Comte and Fabien
Mathieu",
title = "Performance of Balanced Fairness in Resource Pools: a
Recursive Approach",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "125--127",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219669",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding the performance of a pool of servers is
crucial for proper dimensioning. One of the main
challenges is to take into account the complex
interactions between servers that are pooled to process
jobs. In particular, a job can generally not be
processed by any server of the cluster due to various
constraints like data locality. In this paper, we
represent these constraints by some assignment graph
between jobs and servers. We present a recursive
approach to computing performance metrics like mean
response times when the server capacities are shared
according to balanced fairness. While the computational
cost of these formulas can be exponential in the number
of servers in the worst case, we illustrate their
practical interest by introducing broad classes of pool
structures that can be exactly analyzed in polynomial
time. This extends considerably the class of models for
which explicit performance metrics are accessible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2018:DLC,
author = "Xingyu Zhou and Fei Wu and Jian Tan and Yin Sun and
Ness Shroff",
title = "Designing Low-Complexity Heavy-Traffic Delay-Optimal
Load Balancing Schemes: Theory to Algorithms",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "128--128",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219670",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We establish a unified analytical framework for
designing load balancing algorithms that can
simultaneously achieve low latency, low complexity, and
low communication overhead. We first propose a general
class $ \P $ of load balancing policies and prove that
they are both throughput optimal and heavy-traffic
delay optimal. This class $ \P $ includes popular
policies such as join-shortest-queue (JSQ) and
power-of- d as special cases, but not the recently
proposed join-idle-queue (JIQ) policy. In fact, we show
that JIQ is not heavy-traffic delay optimal even for
homogeneous servers. By exploiting the flexibility
offered by the class $ \P $, we design a new load
balancing policy called join-below-threshold (JBT-d),
in which the arrival jobs are preferentially assigned
to queues that are no greater than a threshold, and the
threshold is updated infrequently. JBT-d has several
benefits: (i) JBT-d belongs to the class $ \P_i $ and
hence is throughput optimal and heavy-traffic delay
optimal. (ii) JBT-d has zero dispatching delay, like
JIQ and other pull-based policies, and low message
overhead due to infrequent threshold updates. (iii)
Extensive simulations show that JBT-d has good delay
performance, comparable to the JSQ policy in various
system settings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2018:TFC,
author = "Sinong Wang and Ness Shroff",
title = "Towards Fast-Convergence, Low-Delay and Low-Complexity
Network Optimization",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "129--131",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219671",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed network optimization has been studied
several years. However, we still do not have a good
idea of how to design schemes that can simultaneously
provide good performance across the dimensions of
utility optimality, convergence speed, and delay. To
address these challenges, in this paper, we propose a
new algorithmic framework with all these metrics
approaching optimality. The salient features of our new
algorithm are three-fold: (i) fast convergence: it
converges with only $ O(\log (1 / \epsilon)) $
iterations, that is the fastest speed among all the
existing algorithms; (ii) low delay: it guarantees
optimal utility with finite queue length; (iii) simple
implementation: the control variables of this algorithm
are based on virtual queues that do not require
maintaining per-flow information. The new technique
builds on a kind of inexact Uzawa method in the
Alternating Directional Method of Multiplier. A
theoretical contribution of independent interest is a
new pathway we provide to prove global and linear
convergence rate of Uzawa-ADMM without requiring the
full rank assumption of the constraint matrix.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aghajani:2018:PMA,
author = "Reza Aghajani and Xingjie Li and Kavita Ramanan",
title = "The {PDE} Method for the Analysis of Randomized Load
Balancing Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "132--134",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219672",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new framework for the analysis of
large-scale load balancing networks with general
service time distributions, motivated by applications
in server farms, distributed memory machines, cloud
computing and communication systems. For a parallel
server network using the so-called $ S Q(d) $ load
balancing routing policy, we use a novel representation
for the state of the system and identify its fluid
limit, when the number of servers goes to infinity and
the arrival rate per server tends to a constant. The
fluid limit is characterized as the unique solution to
a countable system of coupled partial differential
equations (PDE), which serve to approximate transient
Quality of Service parameters such as the expected
virtual waiting time and queue length distribution. In
the special case when the service time distribution is
exponential, our method recovers the well-known
ordinary differential equation characterization of the
fluid limit. Furthermore, we develop a numerical scheme
to solve the PDE, and demonstrate the efficacy of the
PDE approximation by comparing it with Monte Carlo
simulations. We also illustrate how the PDE can be used
to gain insight into the performance of large networks
in practical scenarios by analyzing relaxation times in
a backlogged network. In particular, our numerical
approximation of the PDE uncovers two interesting
properties of relaxation times under the SQ(2)
algorithm. Firstly, when the service time distribution
is Pareto with unit mean, the relaxation time decreases
as the tail becomes heavier. This is a priori
counterintuitive given that for the Pareto
distribution, heavier tails have been shown to lead to
worse tail behavior in equilibrium. Secondly, for unit
mean light-tailed service distributions such as the
Weibull and lognormal, the relaxation time decreases as
the variance increases. This is in contrast to the
behavior observed under random routing, where the
relaxation time increases with increase in variance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2018:SRL,
author = "Sen Yang and Bill Lin and Jun Xu",
title = "Safe Randomized Load-Balanced Switching by Diffusing
Extra Loads",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "135--137",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219673",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load-balanced switch architectures are known to be
scalable in both size and speed, which is of interest
due to the continued exponential growth in Internet
traffic. However, the main drawback of load-balanced
switches is that packets can depart out of order from
the switch. Randomized load-balancing of application
flows by means of hashing on the packet header is a
well-known simple solution to this packet reordering
problem in which all packets belonging to the same
application flow are routed through the same
intermediate port and hence the same path through the
switch. Unfortunately, this method of load-balancing
can lead to instability, depending on the mix of flow
sizes and durations in the group of flows that gets
randomly assigned to route through the same
intermediate port. In this paper, we show that the
randomized load-balancing of application flows can be
enhanced to provably guarantee both stability and
packet ordering by extending the approach with safety
mechanisms that can uniformly diffuse packets across
the switch whenever there is a build-up of packets
waiting to route through some intermediate port.
Although simple and intuitive, our experimental results
show that our extended randomized load-balancing
approach outperforms existing load-balanced switch
architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:2018:AOL,
author = "Debankur Mukherjee and Sem Borst and Johan S. H. van
Leeuwaarden",
title = "Asymptotically Optimal Load Balancing Topologies",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "138--138",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219674",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system of $N$ servers inter-connected
by some underlying graph topology G $N$. Tasks with
unit-mean exponential processing times arrive at the
various servers as independent Poisson processes of
rate lambda. Each incoming task is irrevocably assigned
to whichever server has the smallest number of tasks
among the one where it appears and its neighbors in G
$N$. The above model arises in the context of load
balancing in large-scale cloud networks and data
centers, and has been extensively investigated in the
case G $N$ is a clique. Since the servers are
exchangeable in that case, mean-field limits apply, and
in particular it has been proved that for any lambda <
1, the fraction of servers with two or more tasks
vanishes in the limit as $ N \to \infty $. For an
arbitrary graph G $N$, mean-field techniques break
down, complicating the analysis, and the queue length
process tends to be worse than for a clique.
Accordingly, a graph G $N$ is said to be $N$-optimal or
\infty N-optimal when the queue length process on G $N$
is equivalent to that on a clique on an $N$-scale or
\infty N-scale, respectively. We prove that if G $N$ is
an Erdos-R{\'e}nyi random graph with average degree $
d(N)$, then with high probability it is $N$-optimal and
$ \infty N$-optimal if $ d(N) \to \infty $ and $ d(N) /
(\infty N \log (N)) \to \infty $ as $ N \to \infty $,
respectively. This demonstrates that optimality can be
maintained at $N$-scale and \infty N-scale while
reducing the number of connections by nearly a factor
$N$ and \infty N/ log(N) compared to a clique, provided
the topology is suitably random. It is further shown
that if G $N$ contains $ \Theta (N)$ bounded-degree
nodes, then it cannot be $N$-optimal. In addition, we
establish that an arbitrary graph G $N$ is $N$-optimal
when its minimum degree is $ N - o(N)$, and may not be
$N$-optimal even when its minimum degree is c $N$ +
o(N) for any $ 0 < c < 1 / 2$. Simulation experiments
are conducted for various scenarios to corroborate the
asymptotic results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hegde:2018:ASP,
author = "Nidhi Hegde",
title = "{ACM Sigmetrics Performance Evaluation Review}: a New
Series on Diversity",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "139--139",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219675",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This editorial announces a new series on diversity in
the ACM Sigmetrics Performance Evaluation Review (PER).
In several upcoming and future issues we will feature
invited articles on diversity from authors in the
performance evaluation community, but also from the
larger Computing Science (CS) community. The articles
will touch various aspects in CS including K--12 and
post-secondary education, graduate studies, academic
recruitment, industry perspectives, harassment issues,
and gender, ethnicity, and racial bias.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2018:DFR,
author = "Leana Golubchik and Mallory Redel",
title = "Diversity in Faculty Recruiting: a {WiSE} Approach",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "140--142",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3219676",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this article, we share approaches and practices
that we have found to be successful in supporting women
in STEM fields, based on our experience with the
University of Southern California's (USC) Women in
Science and Engineering Program (WiSE). Specifically,
we focus on faculty recruitment and retention, as
faculty makeup and their success affects the entire
pipeline, including graduate and undergraduate
students.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fanti:2018:SDL,
author = "Giulia Fanti",
title = "Session details: Load Balancing",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258594",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2018:SDR,
author = "Nicolas Gast",
title = "Session details: Resource Management {II}",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258595",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2018:SDL,
author = "Leana Golubchik",
title = "Session details: Learning {I}",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258590",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2018:SDS,
author = "Mor Harchol-Balter",
title = "Session details: Scheduling {I}",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258589",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Houdt:2018:SDN,
author = "Benny {Van Houdt}",
title = "Session details: Networking",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258591",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maguluri:2018:SDE,
author = "Siva Theja Maguluri",
title = "Session details: Emerging Areas",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misra:2018:SDR,
author = "Vishal Misra",
title = "Session details: Resource Management {I}",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ren:2018:SDS,
author = "Shaolei Ren",
title = "Session details: Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258593",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2018:SDL,
author = "Devavrat Shah",
title = "Session details: Learning {II}",
journal = j-SIGMETRICS,
volume = "46",
number = "1",
pages = "",
month = jun,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3292040.3258592",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:57 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2018:SIW,
author = "Mark S. Squillante",
title = "Special Issue on {The Workshop on MAthematical
performance Modeling and Analysis (MAMA 2018)}",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "2--2",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305220",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2018:MDP,
author = "Hong Xie and John C. S Lui",
title = "A {Markov} Decision Process Approach to Analyze
Discount \& Reputation Trade-offs in E-commerce
Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "3--5",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-commerce systems, e.g., Amazon, eBay and Taobao, are
becoming increasingly popular. We consider eBay (or
Taobao) like E-commerce systems, where a large number
of sellers and buyers transact online. To reflect the
trustworthiness of sellers, a reputation system is
maintained. In particular, the feedback-based
reputation system [4] is the most widely deployed.
Sellers of such systems are initialized with a low
reputation and they must obtain a sufficiently large
number of positive feedbacks from buyers to earn a
reputable label. For example, eBay and Taobao use
three-level feedbacks, i.e., (-1 (Negative), 0
(Neutral), 1 (Positive)). Each seller is initialized
with a reputation score of zero. A positive (or
negative) rating increases (or decreases) the
reputation score by one, while a neutral rating does
not change the reputation score.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2018:MAC,
author = "Yudong Yang and Vishal Misra and Dan Rubenstein",
title = "A Modeling Approach to Classifying Malicious Cloud
Users via Shuffling",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "6--8",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305222",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "DDoS attacks are still a serious security issue on the
Internet. We explore a distributed Cloud setting in
which users are mapped to servers where malicious users
mapped to the same server can thwart the performance of
legitimate users. By periodically shuffling the mapping
of users to servers and observing how this affects
successfully attacked servers, the malicious users can
be identified. We use simple models to understand how
to best score these observations to identify malicious
users with well-defined levels of confidence.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2018:SMSa,
author = "Isaac Grosof and Ziv Scully and Mor Harchol-Balter",
title = "{SRPT} for Multiserver Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "9--11",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305223",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Shortest Remaining Processing Time (SRPT)
scheduling policy and variants thereof have been
deployed in many computer systems, including web
servers [5], networks [9], databases [3] and operating
systems [1]. SRPT has also long been a topic of
fascination for queueing theorists due to its
optimality properties. In 1966, the mean response time
for SRPT was first derived [11], and in 1968 SRPT was
shown to minimize mean response time in both a
stochastic sense and a worst-case sense [10]. However,
these beautiful optimality results and the analysis of
SRPT are only known for single-server systems. Almost
nothing is known about SRPT in multiserver systems,
such as the M/G/k, even for the case of just k = 2
servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nakahira:2018:MVDa,
author = "Yorie Nakahira and Andres Ferragut and Adam Wierman",
title = "Minimal-variance distributed scheduling under strict
demands and deadlines",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "12--14",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305224",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many modern schedulers can dynamically adjust their
service capacity to match the incoming workload. At the
same time, however, variability in service capacity
often incurs operational and infrastructure costs. In
this abstract, we characterize an optimal distributed
algorithm that minimizes service capacity variability
when scheduling jobs with deadlines. Specifically, we
show that Exact Scheduling minimizes service capacity
variance subject to strict demand and deadline
requirements under stationary Poisson arrivals.
Moreover, we show how close the performance of the
optimal distributed algorithm is to that of the optimal
centralized algorithm by deriving a
competitive-ratio-like bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2018:SSS,
author = "Xin Liu and Lei Ying",
title = "A Simple Steady-State Analysis of Load Balancing
Algorithms in the Sub-{Halfin--Whitt} Regime",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "15--17",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305225",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the steady-state performance of
load balancing algorithms in many-server systems. We
consider a system with N identical servers with buffer
size $ b - 1 $ such that $ b = o(\sqrt N) $, in other
words, each server can hold at most $b$ jobs, one job
in service and $ b - 1$ jobs in buffer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:2018:JIQ,
author = "Debankur Mukherjee and Alexander Stolyar",
title = "Join-Idle-Queue with Service Elasticity",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "18--20",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305226",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the model of a token-based joint
auto-scaling and load balancing strategy, proposed in a
recent paper by Mukherjee, Dhara, Borst, and van
Leeuwaarden [4] (SIGMETRICS '17), which offers an
efficient scalable implementation and yet achieves
asymptotically optimal steady-state delay performance
and energy consumption as the number of servers N ! 1.
In the above work, the asymptotic results are obtained
under the assumption that the queues have fixed-size
finite buffers, and therefore the fundamental question
of stability of the proposed scheme with infinite
buffers was left open. In this paper, we address this
fundamental stability question. The system stability
under the usual subcritical load assumption is not
automatic. Moreover, the stability may not even hold
for all N. The key challenge stems from the fact that
the process lacks monotonicity, which has been the
powerful primary tool for establishing stability in
load balancing models. We develop a novel method to
prove that the subcritically loaded system is stable
for large enough N, and establish convergence of
steady-state distributions to the optimal one, as N !
1. The method goes beyond the state of the art
techniques it uses an induction-based idea and a `weak
monotonicity' property of the model; this technique is
of independent interest and may have broader
applicability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2018:FAH,
author = "Xiao Sun and Tan N. Le and Mosharaf Chowdhury and
Zhenhua Liu",
title = "Fair Allocation of Heterogeneous and Interchangeable
Resources",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "21--23",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305227",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the proliferation of heterogeneous
processors such as multi-core CPUs, GPUs, TPUs, and
other accelerators for machine learning, we formulate a
novel multiinterchangeable resource allocation (MIRA)
problem where some resources are interchangeable. The
challenge is how to allocate interchangeable resources
to users in a sharing system while maintaining
desirable properties such as sharing incentive, Pareto
efficiency, and envy-freeness. In this paper, we first
show that existing algorithms, including the Dominant
Resource Fairness used in production systems, fail to
provide these properties for interchangeable resources.
Then we characterize the tradeoff between performance
and strategyproofness, and design the Budget-based
(BUD) algorithm, which preserves Pareto efficiency,
sharing incentive and envyfreeness while providing
better performance over currently used algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayesta:2018:RDC,
author = "Urtzi Ayesta",
title = "On redundancy-$d$ with cancel-on-start a.k.a
Join-shortest-work ($d$)",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "24--26",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using redundancy to minimize latency in parallel
server systems has become very popular in recent years
[1-6]. While there are several variants of a
redundancy-based system, the general notion of
redundancy is to create multiple copies of the same job
that will be sent to a subset of servers. By allowing
for redundant copies, the aim is to minimize the system
latency by exploiting the variability in the queue
lengths of the different queues. Several recent works,
both empirically [1,2] and theoretically [3-6], have
provided indications that redundancy can help in
reducing the response time of a system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panigrahy:2018:QTM,
author = "Nitish K. Panigrahy and Prithwish Basu and Don Towsley
and Ananthram Swami and Kevin S. Chan and Kin K.
Leung",
title = "A queueing-theoretic model for resource allocation in
one-dimensional distributed analytics network?",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "27--29",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305229",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of allocating requesters of
analytic tasks to resources on servers. We assume both
requesters and servers are placed in a one dimensional
line: $[0,1)$ according to two Poisson processes with
each server having finite capacity. Requesters
communicate with servers under a noninterference
wireless protocol. We consider a ``Move to Right''
(MTR) request allocation strategy where each requester
is allocated to the nearest available server to its
right. We start our analysis from a single resource per
request scenario where each requester demands a single
computational resource. We map this scenario to an
M/M/1 queue or a bulk service M/M/1 queue depending on
the server capacity. We compare the performance of the
MTR strategy with the globally optimal strategy taking
``expected distance traveled by a request'' (request
distance) as performance metric. Next, we extend our
analysis to two resources per request scenario. We show
that the behavior of MTR can be transformed into an
equivalent fork-join queue problem. Numerical results
are presented to validate the claim.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2018:RMFb,
author = "Nicolas Gast and Diego Latella and Mieke Massink",
title = "A Refined Mean Field Approximation for Synchronous
Population Processes",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "30--32",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mean field approximation is a popular method to study
the behaviour of stochastic models composed of a large
number of interacting objects. When the objects are
asynchronous, the mean field approximation of a
population model can be expressed as an ordinary
differential equation. When the objects are synchronous
the mean field approximation is a discrete time
dynamical system. In this paper, we focus on the
latter. We show that, similarly to the asynchronous
case, the mean field approximation of a synchronous
population can be refined by a term in 1/N. Our result
holds for finite time-horizon and steady-state. We
provide two examples that illustrate the approach and
its limit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shneer:2018:SSD,
author = "Seva Shneer and Alexander Stolyar",
title = "Stability of a standard decentralised medium access",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "33--35",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305231",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a stochastic queueing system modelling the
behaviour of a wireless network with nodes employing a
discrete-time version of the standard decentralised
medium access algorithm. The system is unsaturated:
each node receives an exogenous flow of packets at the
rate ! packets per time slot. Each packet takes one
slot to transmit, but neighbouring nodes cannot
transmit simultaneously. The algorithm we study is
standard in that: a node with empty queue does not
compete for medium access; the access procedure by a
node does not depend on its queue length, as long as it
is non-zero. Two system topologies are considered, with
nodes arranged in a circle and in a line. We prove
that, for either topology, the system is stochastically
stable under condition ! < 2/5. This result is
intuitive for the circle topology as the throughput
each node receives in a saturated system (with infinite
queues) is equal to the so-called parking constant,
which is larger than 2/5. (This fact, however, does not
help to prove our result.) The result is not intuitive
at all for the line topology as in a saturated system
some nodes receive a throughput lower than 2/5.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sabnis:2018:OOB,
author = "Anirudh Anirudh Sabnis and Ramesh K. Sitaraman and
Donald Towsley",
title = "{OCCAM}: an Optimization Based Approach to Network
Inference",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "36--38",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of inferring the structure of a
communication network based only on network
measurements made from a set of hosts situated at the
network periphery. Our novel approach called ``OCCAM''
is based on the principle of Occam's razor and finds
the ``simplest'' network that explains the observed
network measurements. OCCAM infers the internal
topology of a communication network, including the
internal nodes and links of the network that are not
amenable to direct measurement. In addition to network
topology, OCCAM infers the routing paths that packets
take between the hosts. OCCAM uses path metrics
measurable from the hosts and expresses the observed
measurements as constraints of a mixed-integer bilinear
optimization problem that can then be feasibly solved
to yield the network topology and the routing paths. We
empirically validate OCCAM on a wide variety of
real-world ISP networks and show that its inferences
agree closely with the ground truth. Specifically,
OCCAM infers the topology with an average network
similarity score of 93\% and infers routing paths with
a path edit distance of 0.20. Further, OCCAM is robust
to error in its measured path metric inputs, producing
high quality inferences even when 20--30\% of its
inputs are erroneous. Our work is a significant advance
in network tomography as it proposes and empirically
evaluates the first method that infers the complete
network topology, rather than just logical routing
trees from sources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qin:2018:CPIa,
author = "Junjie Qin and Ram Rajagopal and Shai Vardi and Adam
Wierman",
title = "Convex Prophet Inequalities",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "39--41",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new class of prophet
inequalities-convex prophet inequalities-where a
gambler observes a sequence of convex cost functions
ci(xi) and is required to assign some fraction $ 0 \leq
\xi \leq 1 $ to each, such that the sum of assigned
values is exactly 1. The goal of the gambler is to
minimize the sum of the costs. We provide an optimal
algorithm for this problem, a dynamic program, and show
that it can be implemented in polynomial time when the
cost functions are polynomial. We also precisely
characterize the competitive ratio of the optimal
algorithm in the case where the gambler has an outside
option and there are polynomial costs, showing that it
grows as $ \Theta (n p - 1 / l) $, where $n$ is the
number of stages, $p$ is the degree of the polynomial
costs and the coefficients of the cost functions are
bounded by $ [l, u]$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:2018:SOC,
author = "Gautam Goel",
title = "Smoothed Online Convex Optimization via Online
Balanced Descent",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "42--44",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305234",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study smoothed online convex optimization, a
version of online convex optimization where the learner
incurs a penalty for changing her actions between
rounds. Given a $ \Omega (p d) $ lower bound on the
competitive ratio of any online algorithm, where d is
the dimension of the action space, we ask under what
conditions this bound can be beaten. We introduce a
novel algorithmic framework for this problem, Online
Balanced Descent (OBD), which works by iteratively
projecting the previous point onto a carefully chosen
level set of the current cost function so as to balance
the switching costs and hitting costs. We demonstrate
the generality of the OBD framework by showing how,
with different choices of ``balance,'' OBD can improve
upon state-of-the-art performance guarantees for both
competitive ratio and regret; in particular, OBD is the
first algorithm to achieve a dimension-free competitive
ratio, 3 + $ O(1 / \alpha) $, for locally polyhedral
costs, where --- measures the ``steepness'' of the
costs. We also prove bounds on the dynamic regret of
OBD when the balance is performed in the dual space
that are dimension-free and imply that OBD has
sublinear static regret.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghosh:2018:MMO,
author = "Soumyadip Ghosh and Mark S. Squillante and Ebisa D.
Wollega",
title = "On Min-Max Optimization Over Large Data Sets",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "45--47",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305235",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a general min-max optimization formulation
defined over a sample space X, probability distribution
P on X",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thai:2018:ASI,
author = "My T. Thai and Arun Sen and Arun Das",
title = "{ACM SIGMETRICS} International Workshop on Critical
Infrastructure Network Security",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "48--49",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The second ACM SIGMETRICS International Workshop on
Critical Infrastructure Network Security took place in
Irvine, California, USA on June 18th 2018 in
conjunction with ACM SIGMETRICS 2018. As in the
previous year, the workshop received widespread
community support and as a consequence we were able to
conduct a successful workshop. The workshop provided a
forum for scientists and engineers from research
institutions and universities to present their findings
on critical infrastructure security. It also provided a
forum for an exchange of ideas and in-depth discussions
on the future research directions in this area. The
workshop featured several invited and contributed talks
from researchers from institutions such as the
University of Illinois at Urbana-Champaign, Princeton
University, Sandia National Laboratories, Delft
University (The Netherlands), ENEA (Italy),
Northeastern University, Hamburg University (Germany),
National University of Singapore, California Institute
of Technology, New York University and University of
Kentucky. To disseminate the outcome of this workshop,
we are pleased to include extended abstracts of
selected workshop presentations in Performance
Evaluation Review (PER), the quarterly publication of
ACM SIGMETRICS. A brief overview of the included papers
is outlined below.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soltan:2018:RCP,
author = "Saleh Soltan and Mihalis Yannakakis and Gil Zussman",
title = "{REACT} to Cyber-Physical Attacks on Power grids
(Extended Abstract)",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "50--51",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study cyber attacks on power grids that affect both
the physical infrastructure and the data at the control
center? which therefore are cyber-physical in nature.
In particular, we assume that an adversary attacks an
area by: (i) remotely disconnecting some lines within
the attacked area, and (ii) modifying the information
received from the attacked area to mask the line
failures and hide the attacked area from the control
center. For the latter, we consider two types of
attacks: (i) data distortion: which distorts the data
by adding powerful noise to the actual data, and (ii)
data replay: which replays a locally consistent old
data instead of the actual data. We use the DC power
flow model and prove that the problem of finding the
set of line failures given the phase angles of the
nodes outside of the attacked area is strongly NP-hard,
even when the attacked area is known. However, we
introduce the polynomial time REcurrent Attack
Containment and deTection (REACT) Algorithm to
approximately detect the attacked area and line
failures after a cyber-physical attack.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2018:ASC,
author = "Linan Huang and Quanyan Zhu",
title = "Adaptive Strategic Cyber Defense for Advanced
Persistent Threats in Critical Infrastructure
Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "52--56",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305239",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advanced Persistent Threats (APTs) have created new
security challenges for critical infrastructures due to
their stealthy, dynamic, and adaptive natures. In this
work, we aim to lay a game-theoretic foundation by
establishing a multi-stage Bayesian game framework to
capture incomplete information of deceptive APTs and
their multistage multi-phase movement. The analysis of
the perfect Bayesian Nash equilibrium (PBNE) enables a
prediction of attacker's behaviors and a design of
defensive strategies that can deter the adversaries and
mitigate the security risks. A conjugate-prior method
allows online computation of the belief and reduces
Bayesian update into an iterative parameter update. The
forwardly updated parameters are assimilated into the
backward dynamic programming computation to
characterize a computationally tractable and
time-consistent equilibrium solution based on the
expanded state space. The Tennessee Eastman (TE)
process control problem is used as a case study to
demonstrate the dynamic game under the information
asymmetry and show that APTs tend to be stealthy and
deceptive during their transitions in the cyber layer
and behave aggressively when reaching the targeted
physical plant. The online update of the belief allows
the defender to learn the behavior of the attacker and
choose strategic defensive actions that can thwart
adversarial behaviors and mitigate APTs. Numerical
results illustrate the defender's tradeoff between the
immediate reward and the future expectation as well as
the attacker's goal to reach an advantageous system
state while making the defender form a positive
belief.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2018:FLPa,
author = "Linqi Guo and Chen Liang and Alessandro Zocca and
Steven H. Low and Adam Wierman",
title = "Failure Localization in Power Systems via Tree
Partitions",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "57--61",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305240",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cascading failures in power systems propagate
non-locally, making the control and mitigation of
outages extremely hard. In this work, we use the
emerging concept of the tree partition of transmission
networks to provide an analytical characterization of
line failure localizability in transmission systems.
Our results rigorously establish the well perceived
intuition in power community that failures cannot cross
bridges, and reveal a finer-grained concept that
encodes more precise information on failure
propagations within tree-partition regions.
Specifically, when a non-bridge line is tripped, the
impact of this failure only propagates within
well-defined components, which we refer to as cells, of
the tree partition defined by the bridges. In contrast,
when a bridge line is tripped, the impact of this
failure propagates globally across the network,
affecting the power flow on all remaining transmission
lines. This characterization suggests that it is
possible to improve the system robustness by
temporarily switching off certain transmission lines,
so as to create more, smaller components in the tree
partition; thus spatially localizing line failures and
making the grid less vulnerable to large-scale outages.
We illustrate this approach using the IEEE 118-bus test
system and demonstrate that switching off a negligible
portion of transmission lines allows the impact of line
failures to be significantly more localized without
substantial changes in line congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oostenbrink:2018:ELD,
author = "Jorik Oostenbrink and Fernando A. Kuipers and Poul E.
Heegaard and Bjarne E. Helvik",
title = "Evaluating Local Disaster Recovery Strategies",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "62--66",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305241",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is of vital importance to maintain at least some
network functionality after a disaster, for example by
temporarily replacing damaged nodes by emergency nodes.
We propose a framework to evaluate different node
replacement strategies, based on a large set of
representative disasters. We prove that computing the
optimal choice of nodes to replace is an NP-hard
problem and propose several simple strategies. We
evaluate these strategies on two U.S. topologies and
show that a simple greedy strategy can perform close to
optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Khamfroush:2018:VII,
author = "Hana Khamfroush and Samuel Lofumbwa Iloo and Mahshid
Rahnamay-Naeini",
title = "Vulnerability of Interdependent Infrastructures Under
Random Attacks",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "67--71",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305242",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most of today's critical infrastructure are in the
form of interdependent networks with new
vulnerabilities attributed to their interdependencies.
Security and reliability attacks, which can trigger
failures within and across these networks, will have
different forms and impacts on interdependent networks.
In this paper, we focus on random attacks in a
two-layer interdependent network and quantify its
vulnerability under two different types of such
attacks: (1) single layer attack, (2) concurrent
two-layer attack. We compare the vulnerability of the
network given the two attack scenarios, to answer the
question of whether one single attack in one layer of
an interdependent network can be as severe as
concurrent multi-layer attack? We define two
quantitative metrics to evaluate the vulnerability of
the interdependent network under a given attack,
namely, long-term effect and critical times. The
long-term effect represents the total number of
affected nodes during a finite time window, while
critical times capture the evolution of failure
propagation over time. The impact of different types of
network topologies, including Erd{\H{o}}s--R{\'e}nyi,
Scale-Free, and Small-World, and different coupling
scenarios between the layers, namely high and low
intensity and random and designed coupling on the
vulnerability of the network is studied. Our results
show that two-layer attacks are more severe in most
cases while a single attack in one layer can be more
severe for certain scenarios of coupling and network
topologies. This suggests that in interdependent
networks severe attacks can be triggered using access
to only one layer of the network if the network
structure is vulnerable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kelic:2018:CRC,
author = "Andjelka Kelic",
title = "Cyber Risk in Critical Infrastructure",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "72--75",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305243",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing approaches to evaluating cyber risk are
summarized and explored for their applicability to
critical infrastructure. The approaches cluster in
three different spaces: network security,
cyber-physical, and mission assurance. In all
approaches, some form of modeling is utilized at
varying levels of detail, while the ability to
understand consequence varies, as do interpretations of
risk. A hybrid approach can account for cyber risk in
critical infrastructure and allow for allocation of
limited resources across the entirety of the risk
spectrum.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2018:SIW,
author = "Zhenhua Liu and Ganesh Ananthanarayanan",
title = "Special Issue on the Work-in-Progress {(WIP)} session
at the {ACM SIGMETRICS 2018 Conference}",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "76--76",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305245",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ACM SIGMETRICS 2018 conference was held in Irvine,
CA, USA. For the first time, we organized the
Work-in-Progress (WIP) session. This session provided a
great opportunity to present early-stage research to
receive valuable feedback from the community through a
dedicated poster session. Topics fell within the
purview of the SIGMETRICS main conference. Within these
topics, we particularly encouraged (but were not
limited to) submissions that are pursuing new, perhaps
controversial, directions. We also welcomed submissions
of posters corresponding to results presented at the
SIGMETRICS-affiliated workshops. Anewcategory of
posters we introduced, unlike poster sessions at other
conferences, is about systems published at related
conference venues (such as SOSP, SIGCOMM, NSDI) that
could benefit from a thorough analytical modeling.
Thework in conferences like SOSP, SIGCOMM and NSDI tend
to focus on the systemic architecture but often do not
do a thorough job of analytically grounding their
solutions. For submissions in this category, we asked
the authors to clearly explain why the poster is
relevant to the SIGMETRICS community. Submissions were
judged based on their novelty as well as the potential
to generate discussions. Eleven submissions were
accepted with attendance from USA and Asia. Topics
range from cloud computing, power systems, security, to
vehicular caching.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Le:2018:BMB,
author = "Tan N. Le and Xiao Sun and Mosharaf Chowdhury and
Zhenhua Liu",
title = "{BoPF}: Mitigating the Burstiness-Fairness Tradeoff in
Multi-Resource Clusters",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "77--78",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305246",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Even though batch, interactive, and streaming
applications all care about performance, their notions
of performance are different. For instance, while the
average completion time can sufficiently capture the
performance of a throughout-sensitive batch-job queue
(TQ) [5], interactive sessions and streaming
applications form latency-sensitive queues (LQ): each
LQ is a sequence of small jobs following an ON-OFF
pattern. For these jobs [7], individual completion
times or latencies are far more important than the
average completion time or the throughput of the LQ.
Indeed, existing ``fair'' schedulers are inherently
unfair to LQ jobs: when LQ jobs are present (ON state),
they must share the resources equally with TQ jobs, but
when they are absent (OFF state), batch jobs get all
the resources. In the long run, TQs receive more
resources than their fair shares because today's
schedulers such as Dominant Resource Fairness [4] make
instantaneous decisions Clearly, it is impossible to
achieve the best response time for LQ jobs under
instantaneous fairness. In other words, there is a hard
tradeoff between providing instantaneous fairness for
TQs and minimizing the response time of LQs. However,
instantaneous fairness is not necessary for TQs because
average-completion time over a relatively long time
horizon is their most important metric. This sheds
light on the following question: how well can we
simultaneously accommodate multiple classes of
workloads with performance guarantees, in particular,
isolation protection for TQs in terms of long-term
fairness and low response times for LQs? This work
serves as our first step in answering the question by
designing BoPF: the first multi-resource scheduler that
achieves both isolation protection for TQs and response
time guarantees for LQs in a strategy-proof way. The
key idea is ``bounded'' priority for LQs: as long as
the burst is not too large to hurt the long-term fair
share of TQs and other LQs, they are given higher
priority so jobs can be completed as quickly as
possible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2018:FLPb,
author = "Linqi Guo and Chen Liang and Alessandro Zocca and
Steven H. Low and Adam Wierman",
title = "Failure Localization in Power Systems via Tree
Partitions",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "79--80",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305247",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cascading failures in power systems propagate
non-locally, making the control and mitigation of
outages extremely hard. In this work, we use the
emerging concept of the tree partition of transmission
networks to provide an analytical characterization of
line failure localizability in transmission systems.
Our results rigorously formalize the well-known
intuition that failures cannot cross bridges, and
reveal a finer-grained concept that encodes more
precise information on failure propagation within
tree-partition regions. Specifically, when a non-bridge
line is tripped, the impact of this failure only
propagates within components of the tree partition
defined by the bridges. In contrast, when a bridge line
is tripped, the impact of this failure propagates
globally across the network, affecting the power flow
on all remaining lines. This characterization suggests
that it is possible to improve the system robustness by
temporarily switching off certain transmission lines,
so as to create more, smaller components in the tree
partition; thus spatially localizing line failures and
making the grid less vulnerable to large outages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hurtado-Lange:2018:NVD,
author = "Daniela Hurtado-Lange and Siva Theja Maguluri",
title = "A Novel View of the Drift: Method for Heavy Traffic
Limits of Queueing Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "81--82",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305248",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The drift method has been recently developed to study
queueing systems in heavy traffic [3]. This method was
successfully used to obtain the heavy traffic scaled
sum queue lengths of several systems, even when the
so-called complete resource pooling condition is not
satisfied. In this poster, we present a novel view of
the drift method to explain why this method works. We
believe that this view can be exploited to obtain the
joint distribution of the steady-state heavy-traffic
scaled queue lengths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruan:2018:EVV,
author = "Yichen Ruan and Carlee Joe-Wong",
title = "On the Economic Value of Vehicular Caching",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "83--84",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305249",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The economic value of a new mobile caching method
utilizing vehicles is studied. An optimization model is
built using stochastic geometry tools. Two possible
choices of utility functions are discussed together
with some preliminary results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qin:2018:CPIb,
author = "Junjie Qin and Ram Rajagopal and Shai Vardi and Adam
Wierman",
title = "Convex Prophet Inequalities",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "85--86",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305250",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new class of prophet
inequalities-convex prophet inequalities-where a
gambler observes a sequence of convex cost functions ci
(xi ) and is required to assign some fraction $ 0 \leq
\xi \leq 1 $ to each, such that the sum of assigned
values is exactly 1. The goal of the gambler is to
minimize the sum of the costs. We provide an optimal
algorithm for this problem, a dynamic program, and show
that it can be implemented in polynomial time when the
cost functions are polynomial. We also precisely
characterize the competitive ratio of the optimal
algorithm in the case where the gambler has an outside
option and there are polynomial costs, showing that it
grows as !(np-1/ \zeta ), where n is the number of
stages, p is the degree of the polynomial costs and the
coefficients of the cost functions are bounded by
[`,u].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Le:2018:AAA,
author = "Tan N. Le and Xiao Sun and Mosharaf Chowdhury and
Zhenhua Liu",
title = "{AlloX}: Allocation across Computing Resources for
Hybrid {CPU\slash GPU} clusters",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "87--88",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305251",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "GPUs are considered as the accelerators for CPUs. We
call these applications GPU applications. Some machine
learning frameworks like Tensorflow support their
machine learning (ML) jobs running either on CPUs or
GPUs. Nvidia claims that Titan GPU K80 12GB can speed
up 5-10x on average. Although GPUs offer the advantages
on performance, they are very expensive. For example, a
GPU K80 roughly costs \$4000 while an Intel Xeon E5
Quadcores costs \$350. The coexist of traditional CPU
and GPU applications urges cloud computing operators to
build hybrid CPU/GPU clusters. While the traditional
applications are executed on CPUs, the GPU applications
can run on either CPUs or GPUs. In the CPU/GPU
clusters, how to provision the hybrid CPU/GPU clusters
for CPU and GPU applications and how to allocate the
resources across CPUs and GPUs? Interchangeable
resources like CPUs and GPUs are not rare in large
clusters. Some network I/O cards like wireless,
Ethernet, Infinityband with different bandwidths can
also be interchangeable. In this paper, we focus on
CPU/GPU systems. We develop a tool that estimates the
performance and resource for an ML job in an online
manner (\S 2). We implement AlloX system that supports
resource allocation and places applications on right
resources (CPU or GPU) to maximize the use of
computational resource (\S 3). The proposed AlloX
policy achieves up to 35\% progress improvement
compared to default DRF [2]. We build a model that
minimizes the total cost of ownership for CPU/GPU data
centers (\S 4).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zuo:2018:OBP,
author = "Jinhang Zuo and Xiaoxi Zhang and Carlee Joe-Wong",
title = "Observe Before Play: Multi-armed Bandit with
Pre-Observations",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "89--90",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the stochastic multi-armed bandit (MAB)
problem in a setting where a player can, at a cost,
pre-observe one or multiple arms before playing one of
them in each round. Apart from the classic trade-off
between exploration (trying out more arms to find the
best one) and exploitation (sticking with the arm
believed to offer the highest reward), we encounter an
additional dilemma in each single round, i.e.,
pre-observing more arms gives a higher chance to play
the best one, but incurs a larger cost which decreases
the overall reward. We design an Observe-Before-Play
(OBP) policy for arms with Bernoulli rewards, which
could be generalized to any i.i.d. reward distributions
bounded in [0, 1]. Our strategy could enable a better
policy for secondary spectrum access in Cognitive Ratio
Networks, where users can sense multiple channels'
occupancies before choosing one on which to transmit.
To evaluate our policy, we define the regret as the gap
between the expected overall reward gained by our OBP
policy and that obtained by the expected optimum, which
always chooses an optimal sequence of arms to
pre-observe based on the perfect knowledge of the arm
distributions. Experiments show that our OBP policy has
sub-linear regret and can outperform the classical MAB
algorithm when the cost of pre-observations is
relatively low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2018:MPL,
author = "Justin Wang and Benjamin Berg and Daniel S. Berger and
Siddhartha Sen",
title = "Maximizing Page-Level Cache Hit Ratios in {LargeWeb}
Services",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "91--92",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large web services typically serve pages consisting of
many individual objects. To improve the response times
of page-requests, these services store a small set of
popular objects in a fast caching layer. A page-request
is not considered complete until all of its objects
have either been found in the cache or retrieved from a
backend system. Hence, caching only speeds up a page
request if all of its objects are found in the cache.
We seek caching policies that maximize the page-level
hit ratio-the fraction of requests that find all of
their objects in the cache. This work analyzes page
requests served by a Microsoft production system.We
find that in practice there is potential for improving
the page-level hit ratio over existing caching
strategies, but that analytically maximizing the
page-level hit ratio is NP-hard.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shao:2018:FLT,
author = "Zhihui Shao and Mohammad A. Islam and Shaolei Ren",
title = "A First Look at Thermal Attacks in Multi-Tenant Data
Centers",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "93--94",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on the emerging threat of thermal
attacks in a multi-tenant data center. It discovers
that a malicious tenant (i.e., attacker) can inject
additional thermal loads beyond the shared cooling
system capacity, thus resulting in overheating and
possible system downtime. Importantly, the attacker can
launch thermal attacks in a stealthy manner by
discharging batteries inside its servers and still
keeping its power drawn from the data center power
distribution system under its subscribed capacity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shen:2018:ENM,
author = "Shan-Hsiang Shen",
title = "An Efficient Network Monitor for {SDN} Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "2",
pages = "95--96",
month = sep,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3305218.3305255",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jan 18 06:03:58 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With growing services running in clouds, it is
critical to defence the services from Distributed
Denial of Service (DDoS) attacks. To this end, network
traffic should be monitored to detect malicious
traffic. Software-defined Networking (SDN) provides a
flexible platform for the network monitoring and relies
on a central controller to ask switches for traffic
statistic to get a global traffic view for security.
However, the control plane resources are limited in SDN
in terms of controller capacity, network bandwidth, and
switch performance. Thus, too much network monitoring
will affect data plane traffic performance. To address
this issue, we propose SDN-Monitor, which carefully
selects switches to monitor to reduce the resource
consumption. Moreover, SDN-Monitorre-routes network
traffic to further reduce the number of monitored
switches.With growing services running in clouds, it is
critical to defence the services from Distributed
Denial of Service (DDoS) attacks. To this end, network
traffic should be monitored to detect malicious
traffic. Software-defined Networking (SDN) provides a
flexible platform for the network monitoring and relies
on a central controller to ask switches for traffic
statistic to get a global traffic view for security.
However, the control plane resources are limited in SDN
in terms of controller capacity, network bandwidth, and
switch performance. Thus, too much network monitoring
will affect data plane traffic performance. To address
this issue, we propose SDN-Monitor, which carefully
selects switches to monitor to reduce the resource
consumption. Moreover, SDN-Monitorre-routes network
traffic to further reduce the number of monitored
switches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tassiulas:2018:ONE,
author = "Leandros Tassiulas",
title = "Optimizing the network edge for flexible service
provisioning",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "1--1",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The virtualization of network resources provides
unique flexibility in service provisioning in most
levels of the network stack. Softwarization of the
network control and operation (SDN) is a key enabler of
that development. Starting from the network core, SDN
is a dominant trend in the evolution of network
architectures with increased emphasis recently on the
network edge. I will present some recent results in
this area starting with a study on migration from
legacy networking to SDN enabled network modules. The
tradeoff between the benefits of SDN upgrades and the
cost of deployment is addressed and captured by an
appropriate sub-modular function that allows to
optimize the penetration pace of the technology.
Validation on some real world network topologies and
traffic matrices will be presented as well. Then we
move our attention to the network periphery. A wireless
multi-hop extension at the network edge is considered
and the problem of enabling SDN is addressed via
replication of SDN controllers. The delay constraints
of the controlled data-path elements is appropriately
modeled and the problem of locating the controllers is
addressed via optimization and a proof-of concept
implementation. An alternate approach is considered
then for the wireless network where we assume
coexistence of SDN enabled components with network
islands operating under distributed adhoc routing
protocols. The trade-off of the coexistence is studied
and the impact of SDN penetration is evaluated. Some
paradigms of collaborative network services are
presented finally as they are enabled by the above
architectural evolution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2018:DAB,
author = "Weina Wang and Mor Harchol-Balter and Haotian Jiang
and Alan Scheller-Wolf and R. Srikant",
title = "Delay Asymptotics and Bounds for Multi-Task Parallel
Jobs",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "2--7",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study delay of jobs that consist of multiple
parallel tasks, which is a critical performance metric
in a wide range of applications such as data file
retrieval in coded storage systems and parallel
computing. In this problem, each job is completed only
when all of its tasks are completed, so the delay of a
job is the maximum of the delays of its tasks. Despite
the wide attention this problem has received, tight
analysis is still largely unknown since analyzing job
delay requires characterizing the complicated
correlation among task delays, which is hard to do. We
first consider an asymptotic regime where the number of
servers, n, goes to infinity, and the number of tasks
in a job, k(n), is allowed to increase with n. We
establish the asymptotic independence of any k(n)
queues under the condition k(n) = o(n1/4). This greatly
generalizes the asymptotic-independence type of results
in the literature where asymptotic independence is
shown only for a fixed constant number of queues. As a
consequence of our independence result, the job delay
converges to the maximum of independent task delays. We
next consider the non-asymptotic regime. Here we prove
that independence yields a stochastic upper bound on
job delay for any n and any k(n) with k(n){$<$}=n. The
key component of our proof is a new technique we
develop, called ``Poisson oversampling''. Our approach
converts the job delay problem into a corresponding
balls-and-bins problem. However, in contrast with
typical balls-and-bins problems where there is a
negative correlation among bins, we prove that our
variant exhibits positive correlation. A full version
of this paper will all proofs appears in [28].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2018:SMSb,
author = "Isaac Grosof and Ziv Scully and Mor Harchol-Balter",
title = "{SRPT} for Multiserver Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "8--9",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Shortest Remaining Processing Time (SRPT)
scheduling policy and its variants have been
extensively studied in both theoretical and practical
settings. While beautiful results are known for
single-server SRPT, much less is known for multiserver
SRPT. In particular, stochastic analysis of the M/G/k
under SRPT is entirely open. Intuition suggests that
multiserver SRPT should be optimal or near-optimal for
minimizing mean response time. However, the only known
analysis of multiserver SRPT is in the worst-case
adversarial setting, where SRPT can be far from
optimal. In this paper, we give the first stochastic
analysis bounding mean response time of the M/G/k under
SRPT. Using our response time bound, we show that
multiserver SRPT has asymptotically optimal mean
response time in the heavy-traffic limit. The key to
our bounds is a strategic combination of stochastic and
worst-case techniques. Beyond SRPT, we prove similar
response time bounds and optimality results for several
other multiserver scheduling policies. This article is
an introduction to our longer paper, [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2018:FLB,
author = "Xingyu Zhou and Jian Tan and Ness Shroff",
title = "Flexible Load Balancing with Multi-dimensional
State-space Collapse: Throughput and Heavy-traffic
Delay Optimality",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "10--11",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Heavy traffic delay analysis for load balancing
policies has relied heavily on a condition called
state-space collapse onto a single-dimensional line. In
this paper, via Lyapunov drift-based method, we
rigorously prove that even under a multidimensional
state-space collapse, steady-state heavy-traffic delay
optimality can still be achieved for a general load
balancing system. This result directly implies that
achieving steady-state heavy-traffic delay optimality
simply requires that no server is kept idling while
others are busy at heavy loads, thus complementing and
extending the result obtained by diffusion
approximations. Further, we explore the greater
flexibility provided by allowing a multidimensional
state-space collapse in designing new load balancing
policies that are both throughput optimal and
heavytraffic delay optimal in steady state.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chatzieleftheriou:2018:JUA,
author = "L. E. Chatzieleftheriou and G. Darzanos and M.
Karaliopoulos and I. Koutsopoulos",
title = "Joint User Association, Content Caching and
Recommendations in Wireless Edge Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "12--17",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we investigate the performance gains
that are achievable when jointly controlling (i) in
which Small-cell Base Stations (SBSs) mobile users are
associated to, (ii) which content items are stored at
SBS co-located caches and (iii) which content items are
recommended to the mobile users who are associated to
different SBSs. We first establish a framework for the
joint user association, content caching and
recommendations problem, by specifying a set of
necessary conditions for all three component functions
of the system. Then, we provide a concrete formulation
of the joint problem when the objective is to maximize
the total hit ratio over all caches. We analyze the
problems that emerge as special cases of the joint
problem, when one of the three functions is carried out
independently, and use them to characterize its
complexity. Finally, we propose a heuristic that
tackles the joint problem. Proof-of-concept simulations
demonstrate that even this simple heuristic outperforms
an optimal algorithm that takes only caching and
recommendation decisions into account and provide
evidence of the achievable performance gains when
decisions over all three functions are jointly
optimized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wong:2018:HSM,
author = "Yung Fei Wong and Lachlan L. H. Andrew and Y. Ahmet
Sekercioglu",
title = "Hidden semi-{Markov} models for electricity load
disaggregation",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "18--23",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper assesses the performance of a technique for
estimating the power consumption of individual devices
based on aggregate consumption. The new semi-Markov
technique, outperforms pure hidden Markov models on the
REDD dataset. The technique also exploits information
from transients to eliminate a substantial fraction of
the observed errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kakhki:2018:IMW,
author = "Arash Molavi Kakhki and Vijay Erramilli and Phillipa
Gill and Augustin Chaintreau and Balachander
Krishnamurthy",
title = "Information Market for {Web} Browsing: Design,
Usability and Incremental Adoption",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "24--24",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Browsing privacy solutions are faced with an uphill
battle to deployment. Many operate counter to the
economic objectives of popular online services (e.g.,
by completely blocking ads) and do not provide enough
incentive for users who may be subject to performance
degradation for deploying them. In this study, we take
a step towards realizing a system for online privacy
that is mutually beneficial to users and online
advertisers: an information market. This system not
only maintains economic viability for online services,
but provides users with financial compensation to
encourage them to participate. We prototype and
evaluate an information market that provides privacy
and revenue to users while preserving and sometimes
improving their Web performance. We evaluate
feasibility of the market via a one month field study
with 63 users and find that users are indeed willing to
sell their browsing information. We also use Web traces
of millions of users to drive a simulation study to
evaluate the system at scale. We find that the system
can indeed be profitable to both users and online
advertisers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2018:SEM,
author = "Nicolas Gast and Luca Bortolussi and Mirco
Tribastone",
title = "Size Expansions of Mean Field Approximation: Transient
and Steady-State Analysis",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "25--26",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mean field approximation is a powerful tool to study
the performance of large stochastic systems that is
known to be exact as the system's size N goes to
infinity. Recently, it has been shown that, when one
wants to compute expected performance metric in
steady-state, mean field approximation can be made more
accurate by adding a term in 1/N to the original
approximation. This is called the refined mean field
approximation in [7]. In this paper, we show how to
obtain the same result for the transient regime and we
provide a further refinement by expanding the term in
1/N2 (both for transient and steady-state regime). Our
derivations are inspired by moment-closure
approximation. We provide a number of examples that
show this new approximation is usable in practice for
systems with up to a few tens of dimensions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bermolen:2018:DGA,
author = "Paola Bermolen and Matthieu Jonckheere and Federico
Larroca and Manuel Saenz",
title = "Degree-Greedy Algorithms on Large Random Graphs",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "27--32",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computing the size of maximum independent sets is an
NPhard problem for fixed graphs. Characterizing and
designing efficient algorithms to compute (or
approximate) this independence number for random graphs
are notoriously difficult and still largely open
issues. In this paper, we show that a low complexity
degree-greedy exploration is actually asymptotically
optimal on a large class of sparse random graphs.
Encouraged by this result, we present and study two
variants of sequential exploration algorithms: static
and dynamic degree-aware explorations. We derive
hydrodynamic limits for both of them, which in turn
allow us to compute the size of the resulting
independent set. Whereas the former is simpler to
compute, the latter may be used to arbitrarily
approximate the degree-greedy algorithm. Both can be
implemented in a distributed manner. The corresponding
hydrodynamic limits constitute an efficient method to
compute or bound the independence number for a large
class of sparse random graphs. As an application, we
then show how our method may be used to compute (or
approximate) the capacity of a large 802.11-based
wireless network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yajima:2018:CLT,
author = "M. Yajima and T. Phung-Duc",
title = "A central limit theorem for a {Markov}-modulated
infinite-server queue with batch {Poisson} arrivals and
binomial catastrophes",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "33--34",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers the stationary queue length
distribution of a Markov-modulated MX/M/$ \infty $
queue with binomial catastrophes. When a binomial
catastrophe occurs, each customer is either removed
with a probability or is retained with the
complementary probability. We focus on our model under
a heavy traffic regime because its exact analysis is
difficult if not impossible. We establish a central
limit theorem for the stationary queue length of our
model in a heavy traffic regime. The central limit
theorem can be used to approximate the queue length
distribution of our model with large arrival rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:2018:WFC,
author = "Lianjie Shi and Xin Wang and Richard T. B. Ma and Y.
C. Tay",
title = "Weighted Fair Caching: Occupancy-Centric Allocation
for Space-Shared Resources",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "35--36",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional cache replacement policies such as LRU and
LFU were often designed with the focus on efficiency
and aimed at maximizing the hit rates. However, the
resource owners of modern computing systems such as
cloud infrastructures and content delivery networks
often have new objectives such as fairness and revenue
to be optimized rather than the overall hit rate. A
general resource management framework that allows
resource owners to determine various resource
allocations is desirable. Although such a mechanism
like Weighted Fair Queueing (WFQ) exists for
indivisible time-shared resources such as CPU and
network bandwidth, no such counterpart exists for
space-shared resources such as cache and main memory.
In this paper, we propose Weighted Fair Caching (WFC),
a capacity-driven cache policy that provides explicitly
tunable resource allocations for cache owners in terms
of the occupancy rates of contents. Through analysis of
the continuous-time Markov Chain model of cache
dynamics, we derive the closed-form occupancy rates as
a function of the weights of contents, and various
properties such as monotonicity and scaling of WFC. We
show that WFC can be used to provide fair sharing of
cache space among contents, as well as class-based
service differentiations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carlsson:2018:WCB,
author = "Niklas Carlsson and Derek Eager",
title = "Worst-case Bounds and Optimized Cache on {$M$}-th
Request Cache Insertion Policies under Elastic
Conditions",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "37--38",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This 2-page extended abstract provides an overview of
the key results presented in more detail in our full
length paper, with the same title, to appear in
Performance Evaluation [2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ni:2018:WEW,
author = "Fan Ni and Xingbo Wu and Weijun Li and Lei Wang and
Song Jiang",
title = "{WOJ}: Enabling Write-Once Full-data Journaling in
{SSDs} by Using Weak-Hashing-based Deduplication",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "39--40",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Journaling is a commonly used technique to ensure data
consistency in file systems, such as ext3 and ext4.
With journaling technique, file system updates are
first recorded in a journal (in the commit phase) and
later applied to their home locations in the file
system (in the checkpoint phase). Based on the contents
recorded in the journal, file system can be either in
data or metadata journaling mode. With data journaling
mode enabled, all file system (data and metadata)
updates are written to the journal before being written
to the files later on. In contrast, with metadata
journaling mode, only updated metadata are written to
and protected by the journal, while data are written
directly to their home locations in the files. File
system users are usually reluctant to use the data
journaling mode as every modification (data and
metadata) to the file system is written twice, and
instead resort to metadata journaling for its fast
speed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Durand:2018:EBR,
author = "Stephane Durand and Federica Garin and Bruno Gaujal",
title = "Efficiency of Best Response Dynamics with High Playing
Rates in Potential Games",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "41--42",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we design and analyze distributed best
response dynamics to compute Nash equilibria in
potential games. This algorithm uses local Poisson
clocks for each player and does not rely on the usual
but unrealistic assumption that players take no time to
compute their best response. If this time (denoted $
\delta $) is taken into account, distributed best
response dynamics (BRD) may suffer from overlaps: one
player starts to play while another player has not
changed its strategy yet. An overlap may lead to a
decrease of the potential but we can show that they do
not jeopardize eventual convergence to a Nash
equilibrium. Our main result is to use a Markovian
approach to show that the average execution time of the
algorithm E[T_{\rm BRD}] can be bounded: $ 2 \delta n
\log n / \log \log n + O(n) \leq E[T_{\rm BRD}] \leq 4
e^\gamma \delta n \log n / \log \log n + O(n)$, where $
\gamma $ is the Euler constant, $n$ is the number of
players and $ \delta $ is the time taken by one player
to compute its best response. These bounds are obtained
by using an asymptotically optimal playing rate $
\lambda $. Our analytic bound shows that $ 2 \delta
\lambda = \log \log n - \log C$, where $C$ is a
constant. This induces a large probability of overlap $
(p = 1 - C / \log {1 / 2 n})$. In practice, numerical
simulations also show that using high playing rates is
efficient, with an optimal probability of overlap popt
\approx 0.78, for $n$ up to 250. This shows that best
response dynamics are unexpectedly efficient to compute
Nash equilibria, even in a distributed setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chouayakh:2018:AML,
author = "Ayman Chouayakh and Aurelien Bechler and Isabel Amigo
and Loutfi Nuaymi and Patrick Maill{\'e}",
title = "Auction mechanisms for Licensed Shared Access: reserve
prices and revenue-fairness trade offs",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "43--48",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308918",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Licensed shared access (LSA) is a new approach that
allows Mobile Network Operators to use a portion of the
spectrum initially licensed to another incumbent user,
by obtaining a license from the regulator via an
auction mechanism. In this context, different truthful
auction mechanisms have been proposed, and differ in
terms of allocation (who gets the spectrum) but also on
revenue. Since those mechanisms could generate an
extremely low revenue, we extend them by introducing a
reserve price per bidder which represents the minimum
amount that each winning bidder should pay. Since this
may be at the expense of the allocation fairness, for
each mechanism we find by simulation the reserve price
that optimizes a trade-off between expected fairness
and expected revenue. Also, for each mechanism, we
analytically express the expected revenue when
valuations of operators for the spectrum are
independent and identically distributed from a uniform
distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zou:2018:TEM,
author = "Mao Zou and Richard T. B. Ma and Yinlong Xu",
title = "Towards An Efficient Market Mediator for Divisible
Resources",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "49--50",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Auction-based markets of divisible resources have
proliferated over recent years. One fundamental problem
facing every market mediator is how to achieve market
efficiency for optimal social welfare, especially when
a limited number of agents forms a monopolistic or
oligopolistic market, because each agent's selfish
strategic behavior may leads to serious degradation in
efficiency. In general, it is difficult for a market
mediator to achieve efficiency since agents'
preferences are hidden information that they are
unwilling to reveal due to security and privacy
concerns. In this paper, we consider a market of
divisible resource consisting of agents on both sides
of demand and supply. We design an adaptive auction
framework for a market mediator to achieve efficient
resource allocation and acquisition. Our novel design
generalizes demand/supply function bidding mechanisms
by introducing price differentiation via tunable
parameters. We design algorithms that enable the
mediator and agents to jointly run the market in an
adaptive fashion: the mediator sends market signals to
agents; each agent submits her bid based on the signals
in a distributed manner; the mediator adjusts tunable
parameters based on bids and update market signals. We
also design an adaptive algorithm to dynamically
determine the optimal amount of resource that needs to
be transacted so as to maximize social welfare, if not
known a priori. By utilizing our market mechanisms, the
market mediator will be able to reach an efficient
market outcome under Nash equilibrium.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thiran:2018:LSD,
author = "Patrick Thiran",
title = "Locating the Source of Diffusion in Large-scale and
Random Networks.",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "51--51",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We survey some results on the localization of the
source of diffusion in a network. There have been
significant efforts in studying the dynamics of
epidemic propagations on networks, and more
particularly on the forward problem of epidemics:
understanding the diffusion process and its dependence
on the infecting and curing rates. We address here the
inverse problem of inferring the original source of
diffusion. If we could observe the entire diffusion
process and collect the times at which nodes of the
network get infected, identifying its source would be
easy. Unfortunately, due to the costs of information
collection and to overhead constraints, the data
available for source localization is usually very
sparse, first because the information that can provided
by a node is limited, and second because the number of
nodes in the network is often prohibitively large, and
only some of them, which we call hereafter sensors,
might be able or willing to provide any information
about their state.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zocca:2018:TSM,
author = "Alessandro Zocca",
title = "Temporal starvation in multi-channel {CSMA} networks:
an analytical framework",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "52--53",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider a stochastic model for a
frequency-agile CSMA protocol for wireless networks
where multiple orthogonal frequency channels are
available. Even when the possible interference on the
different channels is described by different conflict
graphs, we show that the network dynamics can be
equivalently described as that of a single-channel CSMA
algorithm on an appropriate virtual network. Our focus
is on the asymptotic regime in which the network nodes
try to activate aggressively in order to achieve
maximum throughput. Of particular interest is the
scenario where the number of available channels is not
sufficient for all nodes of the network to be
simultaneously active and the well-studied temporal
starvation issues of the single-channel CSMA dynamics
persist. For most networks we expect that a larger
number of available channels should alleviate these
temporal starvation issues. However, we prove that the
aggregate throughput is a non-increasing function of
the number of available channels. To investigate this
trade-off that emerges between aggregate throughput and
temporal starvation phenomena, we propose an analytical
framework to study the transient dynamics of
multi-channel CSMA networks by means of first hitting
times. Our analysis further reveals that the mixing
time of the activity process does not always correctly
characterize the temporal starvation in the
multi-channel scenario and often leads to pessimistic
performance estimates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vasantam:2018:MFB,
author = "Thirupathaiah Vasantam and Arpan Mukhopadhyay and Ravi
R. Mazumdar",
title = "The Mean-field Behavior of Processor Sharing Systems
with General Job Lengths Under the {SQ(d)} Policy",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "54--55",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we derive the mean-field behavior of
empirical distributions of large systems that consist
of N (large) identical parallel processor sharing
servers with Poisson arrival process having intensity $
N \lambda $ and generally distributed job lengths under
the randomized SQ(d) load balancing policy. Under this
policy, an arrival is routed to the server with the
least number of progressing jobs among d randomly
chosen servers. The mean-field is then used to
approximate the statistical properties of the system.
In particular, we show that in the limit as N grows,
individual servers are statistically independent of
others (propagation of chaos) and more importantly, the
equilibrium point of the mean-field is insensitive to
the job length distributions. This has important
engineering relevance for the robustness of such
routing policies that are often used in web server
farms. We use a measure-valued process approach and
martingale techniques to obtain our results. We also
provide numerical results to support our analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nakahira:2018:MVDb,
author = "Yorie Nakahira and Andres Ferragut and Adam Wierman",
title = "Minimal-Variance Distributed Deadline Scheduling in a
Stationary Environment",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "56--61",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many modern schedulers can dynamically adjust their
service capacity to match the incoming workload. At the
same time, however, variability in service capacity
often incurs operational and infrastructure costs. In
this paper, we propose distributed algorithms that
minimize service capacity variability when scheduling
jobs with deadlines. Specifically, we show that Exact
Scheduling minimizes service capacity variance subject
to strict demand and deadline requirements under
stationary Poisson arrivals. We also characterize the
optimal distributed policies for more general settings
with soft demand requirements, soft deadline
requirements, or both. Additionally, we show how close
the performance of the optimal distributed policy is to
that of the optimal centralized policy by deriving a
competitive-ratio-like bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zeballos:2018:AFE,
author = "Martin Zeballos and Andres Ferragut and Fernando
Paganini",
title = "Achieving fairness for {EV} charging in overload: a
fluid approach",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "62--67",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the emergence of Electrical Vehicles (EVs), there
is a growing investment in power infrastructure to
provide charging stations. In an EV parking lot,
typically not all vehicles can be charged
simultaneously, and thus some scheduling must be
performed, taking into account the time the users are
willing to spend in the system. In this paper, we
analyze the performance of several common scheduling
policies through a fluid model. We show that in
overload, the amount of unfinished work is the same for
all policies, but these can distribute the work
performed unfairly across users. We also introduce a
new policy called Least Laxity Ratio that achieves a
suitable notion of fairness across jobs, and validate
its performance by simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hargreaves:2018:FOS,
author = "Eduardo Hargreaves and Claudio Agosti and Daniel
Menasche and Giovanni Neglia and Alexandre
Reiffers-Masson and Eitan Altman",
title = "Fairness in Online Social Network Timelines:
Measurements, Models and Mechanism Design",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "68--69",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Facebook News Feed personalization algorithm has a
significant impact, on a daily basis, on the lifestyle,
mood and opinion of millions of Internet users.
Nonetheless, the behavior of such algorithm lacks
transparency, motivating measurements, modeling and
analysis in order to understand and improve its
properties. In this paper, we propose a reproducible
methodology encompassing measurements, an analytical
model and a fairness-based News Feed design. The model
leverages the versatility and analytical tractability
of time-to-live (TTL) counters to capture the
visibility and occupancy of publishers over a News
Feed. Measurements are used to parameterize and to
validate the expressive power of the proposed model.
Then, we conduct a what-if analysis to assess the
visibility and occupancy bias incurred by users against
a baseline derived from the model. Our results indicate
that a significant bias exists and it is more prominent
at the top position of the News Feed. In addition, we
find that the bias is non-negligible even for users
that are deliberately set as neutral with respect to
their political views, motivating the proposal of a
novel and more transparent fairness-based News Feed
design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Floquet:2018:HBR,
author = "Julien Floquet and Richard Combes and Zwi Altman",
title = "Hierarchical Beamforming: Resource Allocation,
Fairness and Flow Level Performance",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "70--71",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308929",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider hierarchical beamforming in wireless
networks. For a given population of flows, we propose
computationally efficient algorithms for fair rate
allocation including proportional fairness and max-min
fairness. We further propose closed-form formulas for
flow level performance, for both elastic and streaming
traffic (1).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raaijmakers:2018:DPP,
author = "Y. Raaijmakers and S. C. Borst and O. J. Boxma",
title = "Delta probing policies for redundancy",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "72--73",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308931",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider job dispatching in systems with $N$
parallel servers, where jobs arrive according to a
Poisson process of rate $ \lambda $. In redundancy-$d$
policies, replicas of an arriving job are assigned to $
d \leq N$ servers selected uniformly at random (without
replacement) with the objective to reduce the delay. We
introduce a quite general workload model, in which job
sizes have some probability distribution while the
speeds (slowdown factors) of the various servers for a
given job are allowed to be inter-dependent and
non-identically distributed. This allows not only for
inherent speed differences among different servers, but
also for affinity relations. We further propose two
novel redundancy policies, so-called delta-probe-$d$
policies, where $d$ probes of a fixed, small, size $
\Delta $ are created for each incoming job, and
assigned to $d$ servers selected uniformly at random.
As soon as the first of these d probe tasks finishes,
the actual job is assigned for execution with the same
speed --- to the corresponding server and the other
probe tasks are abandoned. We also consider a
delta-probe-$d$ policy in which the probes receive
preemptive-resume priority over regular jobs. The aim
of these policies is to retain the benefits of
redundancy-d policies while accounting for systematic
speed differences and mitigating the risks of running
replicas of the full job simultaneously for long
periods of time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellemans:2018:ARD,
author = "T. Hellemans and B. Vanhoudt",
title = "Analysis of Redundancy(d) with Identical Replicas",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "74--79",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308932",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing systems with redundancy have received
considerable attention recently. The idea of redundancy
is to reduce latency by replicating each incoming job a
number of times and to assign these replicas to a set
of randomly selected servers. As soon as one replica
completes service the remaining replicas are cancelled.
Most prior work on queueing systems with redundancy
assumes that the job durations of the different
replicas are i.i.d., which yields insights that can be
misleading for computer system design. In this paper we
develop a differential equation, using the cavity
method, to assess the workload and response time
distribution in a large homogeneous system with
redundancy without the need to rely on this
independence assumption. More specifically, we assume
that the duration of each replica of a single job is
identical across the servers and follows a general
service time distribution. Simulation results suggest
that the differential equation yields exact results as
the system size tends to infinity and can be used to
study the stability of the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayesta:2018:UPF,
author = "Urtzi Ayesta and Tejas Bodas and Ina Maria Verloop",
title = "On a unifying product form framework for redundancy
models",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "80--81",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308933",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using redundancy to minimize latency in parallel
server systems has become very popular in recent years.
While there are several variants of a redundancy-based
system, the general notion of redundancy is to create
multiple copies of the same job that will be sent to a
subset of servers. By allowing for redundant copies,
the aim is to minimize the system latency by exploiting
the variability in the queue lengths of the different
queues. Several recent works have both empirically and
theoretically showed that redundancy can help in
reducing the response time of a system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rosenberg:2018:HTB,
author = "Catherine Rosenberg",
title = "Highlight Talk on Battery Modeling: Trade-offs between
Accuracy and Complexity",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "82--83",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308935",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We are addressing in this Highlight Talk the general
problem of finding a model with the right level of
complexity and accuracy for a given use case. Focusing
on energy systems and more precisely on how such
systems would benefit from energy storage, we discuss
what makes a good battery model and propose a suite of
battery models going from extremely simple to complex
and compare their pros and cons on two use cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardakanian:2018:LSD,
author = "Omid Ardakanian",
title = "Leveraging Sparsity in Distribution Grids: System
Identification and Harmonic State Estimation",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "84--85",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308936",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Power distribution grids are sparse networks. The
admittance matrix of a (radial or non-radial) power
distribution grid is sparse, safety-critical events are
relatively sparse at any given time compared with the
number of nodes, and loads that produce significant
harmonics at a specific order are also sparse. In this
highlight talk, we define different types of sparsity
in unbalanced three-phase power distribution systems,
and explain how sparsity can be leveraged to address
three increasingly important problems:",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danner:2018:SEP,
author = "Dominik Danner and Hermann de Meer",
title = "State Estimation in the Power Distribution System",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "86--88",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308937",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the domain of power distribution network, software
that can estimate the grid state using several
measurement values as input has been rarely used in the
low voltage grid. Such software tools are based on
adaptive state estimation methods and their accuracy
highly depends on the available input data. Especially,
in the low voltage grid which is mostly not monitored
at all, the increasing number of controllable
high-power loads, such as electric vehicle charging
stations or decentralized photovoltaics and battery
storage systems, directs the focus to the actual grid
state, in particular with regard to the power
quality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vinot:2018:CAL,
author = "Benoit Vinot and Florent Cadoux and Nicolas Gast",
title = "Congestion Avoidance in Low-Voltage Networks by using
the Advanced Metering Infrastructure",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "89--91",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308938",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale decentralized photovoltaic (PV) generators
are currently being installed in many low-voltage
distribution networks. Without grid reinforcements or
production curtailment, they might create current
and/or voltage issues. In this paper, we consider the
use the advanced metering infrastructure (AMI) as the
basis for PV generation control. We show that the
advanced metering infrastructure may be used to infer
some knowledge about the underlying network, and we
show how this knowledge can be used by a simple
feed-forward controller to curtail the solar production
efficiently. By means of numerical simulations, we
compare our proposed controller with two other
controller structures: open-loop, and feed-back P(U)
and Q(U). We demonstrate that our feed-forward
controller --- that requires no prior knowledge of the
underlying electrical network --- brings significant
performance improvements as it can effectively suppress
over-voltage and over-current while requiring low
energy curtailment. This method can be implemented at
low cost and require no specific information about the
network on which it is deployed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2018:PDG,
author = "Yujie Tang and Emiliano Dall'Anese and Andrey
Bernstein and S. H. Low",
title = "A Primal-Dual Gradient Method for Time-Varying
Optimization with Application to Power Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "92--92",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308939",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider time-varying nonconvex optimization
problems where the objective function and the feasible
set vary over discrete time. This sequence of
optimization problems induces a trajectory of
Karush--Kuhn--Tucker (KKT) points. We present a class
of regularized primal-dual gradient algorithms that
track the KKT trajectory. These algorithms are
feedback-based algorithms, where analytical models for
system state or constraints are replaced with actual
measurements. We present conditions for the proposed
algorithms to achieve bounded tracking error when the
cost and constraint functions are twice continuously
differentiable. We discuss their practical implications
and illustrate their applications in power systems
through numerical simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alagha:2018:SAI,
author = "Nader Alagha",
title = "Satellite Air Interface Evolutions in the {$5$G} and
{IoT} Era",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "93--95",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308941",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An overview of satellite air interface evolution, in
light of new initiatives to consider satellite networks
as an integrated part of the 5th generation of
communication networks, is presented. The study of
non-terrestrial networks has identified a variety of
satellite communication services. Despite that, the
adoption of air interface solutions over satellite
links faces several challenges that require in-depth
analyses and possible iterations in design trade-offs.
This has created new incentives for several on-going or
planned R\&D projects in support of new
standardization. While some preliminary observations
can be made based on recent studies, further analyses
are required to reach a consolidated view.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Palattella:2018:AMT,
author = "Maria Rita Palattella and Ridha Soua and Andr{\'e}
Stemper and Thomas Engel",
title = "Aggregation of {MQTT} Topics over Integrated
Satellite-Terrestrial Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "96--97",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308942",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The MQTT application protocol was originally designed
for monitoring a oil pipeline through the desert by
collecting sensor data via satellite link. Thus, by
design MQTT is very suitable for data collection over
integrated satellite-terrestrial networks. Leveraging
on the MQTT Bridge functionality, in this work we
propose a novel architecture with two MQTT Brokers
located at the satellite terminal and the satellite
gateway. By using the topic pattern option, supported
by the bridge, a subscriber can request several topics
within a single request. To reduce the amount of
traffic exchanged over the satellite return channel, we
introduce in the architecture a new entity, namely MQTT
message aggregation filter, which aggregates all the
MQTT topics matching the topic pattern in the same
response.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luong:2018:SHV,
author = "Doanh Kim Luong and Muhammad Ali and Fouad Benamrane
and Ibrahim Ammar and Yim-Fun Hu",
title = "Seamless handover for video streaming over an
{SDN-based} Aeronautical Communications Network",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "98--99",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308943",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There have been increasing interests in applying
Software Defined Networking (SDN) to aeronautical
communications primarily for air traffic management
purposes. From the service passenger communications'
point of view, a major goal is to improve passengers'
perception of quality of experience on the infotainment
services being provided for them. Due to the high speed
of aircrafts and the use of multiple radio technologies
during different flight phases and across different
areas, vertical handovers between these different radio
technologies are envisaged. This poses a challenge to
maintain the quality of service during such handovers,
especially for high bandwidth applications such as
video streaming. This paper proposes an SDN-based
aeronautical communications architecture consisting of
both satellite and terrestrial-based radio technology.
In addition, an experimental implementation of the
Locator ID Separation Protocol (LISP) protocol with
built-in multi-homing capability over the SDN-based
architecture was proposed to handle vertical handovers
between the satellite and other radio technologies
onboard the aircraft. By using both objective and
subjective Quality of Experience (QoE) metrics, the
simulation experiments show the benefit of combining
LISP with SDN to improve the video streaming quality
during the handover in the aeronautical communication
environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bas:2018:IMS,
author = "J. Bas and M. Caus and A. Perez and R. Soleymani and
N. A. K. Beigi",
title = "Interference Management Schemes for High Spectral
Efficiency Satellite Communications",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "100--103",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308944",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a short review on the main high
spectral efficient systems for multibeam satellite
communications. In this regard, we study the use of
Multi-User Detectors (MUD) and Successive Interference
Cancellation (SIC) applied to aggressive frequency
reuse, frequency packing and Non Orthogonal Multiple
Access (NOMA). Furthermore, we have also considered the
presence of co-channel interference and spectrum
limitations. The experimental validations have been
conducted using DVB-S2 waveform. The results point out
that the residual co-channel interference reduce the
benefits of using frequency packing schemes. Moreover,
we investigate the effect of asynchronous reception of
data streams in our previously proposed interference
management scheme based on cooperative NOMA in
multibeam satellite systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Medina-Caballero:2018:LQO,
author = "Julio A. Medina-Caballero and M. Angeles
Vazquez-Castro",
title = "Link Quality Optimization for Hybrid {LEO--GSO}
Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "104--107",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308945",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, an optimised link design for LEO-GSO
satellite systems is proposed so that a variable number
of LEO satellites can maintain a required link quality
when traversing GSO coverage. We identify the orbit,
system and sata traffic parameters that are relevant,
with which we obtain a parameterised optimal
transmission energy function. Our results are useful
for the design of energy-efficient hybrid satellite
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Asuquo:2018:SEC,
author = "P. Asuquo and H. Cruickshank and C. P. A. Ogah",
title = "Securing Emergency Communications for the
Next-Generation Networks: Performance Analysis of
Distributed Trust Computing for Future Emergency
Communications",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "108--111",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308946",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Delay Tolerant Network (DTN) provides connectivity
where there is uncertainty in end-to-end connectivity.
In DTN, nodes exchange buffered messages upon an
encounter. In disaster operations where the
telecommunication and power infrastructures are
completely broken down or destroyed, DTN can be used to
support emergency communication till these
infrastructures are restored. Security in DTN remains a
major challenge because of its network characteristics
such as frequent disruptions, dynamic topology, limited
and constrained resources. One of the major threats in
DTN is Denial of Service (DoS) attacks. This attack
mainly comes from intermediary nodes that drop or flood
packets in the network which often results in the
degradation of the network performance. In this paper,
we propose a collaborative content-aware trust
forwarding for emergency communication networks.
Extensive simulations and validations show that the
proposed schemes outperform existing routing and trust
management protocols in the presence of malicious nodes
and are resilient to trust related attacks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kuhn:2018:RTS,
author = "Nicolas Kuhn",
title = "Research trends in {SATCOM}",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "112--112",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308947",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "State-of-the art multi-gateway ground segments follow
the architectural trends in cellular networks (C-RAN
approaches)and uses the same kind of connectivity. That
being said, the specificity of the link characteristics
make the use of TCP proxies essential for both good
radio resource exploitation and good end-users' quality
of service. Trends in transport may be relevant for
SATCOM networks, but specific optimizations may anyway
be necessary. We present in this talk performance of
the TCP BBR, QUIC and MPTCP over real SATCOM Internet
access to assess the need for further optimization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanMoorsel:2018:BMB,
author = "Aad van Moorsel",
title = "Benchmarks and Models for Blockchain: Consensus
Algorithms",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "113--113",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308949",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this presentation we consider blockchain from a
performance engineering perspective, with an emphasis
on consensus algorithms. A set of examples of
performance characteristics and challenges of public
blockchains serves as introduction to the presentation.
These examples motivate a list of main topics that
require further analysis by the research community, in
both public and private blockchain variants. This list
considers performance engineering challenges across the
different layers of blockchain systems, which we
identify as system, connector, and incentives layers,
respectively. We go in some more depth regarding the
evaluation of consensus algorithms, such as Proof of
Work, which are a core element of the connector layer.
In the presentation we will advocate probabilistic
verification as a key approach to evaluate different
consensus algorithms with respect to practically
meaningful metrics. Throughout the talk, we present
snippets of our recent research results in the area of
modelling and benchmarking blockchain systems [1, 2, 3,
4, 5, 6].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellemans:2018:MCM,
author = "Tim Hellemans and Benny {Van Houdt} and Daniel S.
Menasche and Mandar Datar and Swapnil Dhamal and
Corinne Touati",
title = "Mining competition in a multi-cryptocurrency ecosystem
at the network edge: a congestion game approach",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "114--117",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model the competition over several blockchains
characterizing multiple cryptocurrencies as a
non-cooperative game. Then, we specialize our results
to two instances of the general game, showing
properties of the Nash equilibrium. In particular,
leveraging results about congestion games, we establish
the existence of pure Nash equilibria and provide
efficient algorithms for finding such equilibria.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zander:2018:DSD,
author = "Manuel Zander and Tom Waite and Dominik Harz",
title = "{DAGsim}: Simulation of {DAG}-based distributed ledger
protocols",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "118--121",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308951",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalability of distributed ledgers is a key adoption
factor. As an alternative to blockchain-based
protocols, directed acyclic graph (DAG) protocols are
proposed with the intention to allow a higher volume of
transactions to be processed. However, there is still
limited understanding of the behaviour and security
considerations of DAG-based systems. We present an
asynchronous, continuous time, and multi-agent
simulation framework for DAG-based cryptocurrencies. We
model honest and semi-honest actors in the system to
analyse the behaviour of one specific cryptocurrency,
IOTA. Our simulations show that the agents that have
low latency and a high connection degree have a higher
probability of having their transactions accepted in
the network with honest and semi-honest strategies.
Last, the simulator is built with extensibility in
mind. We are in the process of implementing SPECTRE as
well as including malicious agents.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ricci:2018:LBD,
author = "Saulo Ricci and Eduardo Ferreira and Daniel Sadoc
Menasche and Artur Ziviani and Jose Eduardo Souza and
Alex Borges Vieira",
title = "Learning Blockchain Delays: a Queueing Theory
Approach",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "122--125",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308952",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the growing interest in cryptocurrencies, the
delays incurred to confirm transactions are one of the
factors that hamper the wide adoption of systems such
as Bitcoin. Bitcoin transactions usually are confirmed
in short periods (minutes), but still much larger than
conventional credit card systems (seconds). In this
work, we propose a framework encompassing machine
learning and a queueing theory model to (i) identify
which transactions will be confirmed; and (ii)
characterize the confirmation time of confirmed
transactions. The proposed queueing theory model
accounts for factors such as the activity time of
blocks and the mean time between transactions. We
parameterize the model for transactions that are
confirmed within minutes, suggesting that its
integration into a more general framework is a step
towards building scalability to Bitcoin.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grunspan:2018:PBW,
author = "C. Grunspan and R. Perez-Marco",
title = "On profitability of block withholding strategies",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "126--126",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present recent developments in the understanding of
the profitability block withholding strategies in
Bitcoin mining and other Proof-of-Work based
blockchains. Block withholding strategies (like
selfish, stubborn, trailing or catch-up mining) are
rogue mining strategies that violate the rules of the
Bitcoin protocol. The authors found recently the exact
model based on iterative games to evaluate the
profitability per unit time. With a novel application
of martingale techniques and Doob's Stopping Time
Theorem we compute their profitability in close-form.
We can then compare in parameter space these strategies
and honest mining, and decide which one is more
profitable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bruschi:2018:MIS,
author = "Francesco Bruschi and Vincenzo Rana and Lorenzo
Gentile and Donatella Sciuto",
title = "Mine with it or sell it: the superhashing power
dilemma",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "127--130",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib; In
proof of work blockchain systems, there are strong
incentives towards designing hardware that can mine
faster and/or with less power consumption. There are
two ways of taking advantage of such devices: one can
use them to mine more coins with less power, or he can
sell it to other miners. The two strategies are not
independent, of course: if everybody has the boosting
technology, the difficulty will rise, and it won't be
an advantage anymore. On the other hand, if the boost
is above a certain threshold, being used only by a
small subset of miners might mean centralizing the
system, with potentially dangerous consequences on the
platform credibility. In this paper we analyse the
impact of different strategies to exploit a significant
increase in mining hardware efficiency. To do so, we
developed a multi-agent based simulator, that mimics
the relevant mechanics of the mining ecosystem, as well
as some features of the miners as economic actors. We
then characterized different significant
sell-it-or-mine-with-it strategies, and observed the
simulated outcome.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smuts:2018:WDC,
author = "Nico Smuts",
title = "What Drives Cryptocurrency Prices?: an Investigation
of {Google Trends} and {Telegram} Sentiment",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "131--134",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308955",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Google Trends$^1$ search analysis service and the
Telegram$^2$ messaging platform are investigated to
determine their respective relationships to
cryptocurrency price behaviour. It is shown that, in
contrast to earlier findings, the relationship between
cryptocurrency price movements and internet search
volumes obtained from Google Trends is no longer
consistently positive, with strong negative
correlations detected for Bitcoin and Ethereum during
June 2018. Sentiment extracted from cryptocurrency
investment groups on Telegram is found to be positively
correlated to Bitcoin and Ethereum price movements,
particularly during periods of elevated volatility. The
number of messages posted on a Bitcoin-themed Telegram
group is found to be an indicator of Bitcoin price
action in the subsequent week. A long shortterm memory
(LSTM) recurrent neural network is developed to predict
the direction of cryptocurrency prices using data
obtained from Google Trends and Telegram. It is shown
that Telegram data is a better predictor of the
direction of the Bitcoin market than Google Trends. The
converse is true for Ethereum. The LSTM model produces
the most accurate results when predicting price
movements over a one-week period.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alharby:2018:BSF,
author = "Maher Alharby and Aad van Moorsel",
title = "{BlockSim}: a Simulation Framework for Blockchain
Systems",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "135--138",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308956",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both in the design and deployment of blockchains many
configuration choices need to be made. Investigating
different implementation and design choices is not
feasible or practical on real systems. Therefore, we
propose BlockSim as a framework to build discrete-event
dynamic system models for blockchain systems. BlockSim
is organized in three layers: incentive layer,
connector layer and system layer and is implemented in
Python. This paper introduces BlockSim, with a
particular emphasis on the modeling and simulation of
block creation through the Proof of Work consensus
algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fedchenko:2018:FNN,
author = "Vladyslav Fedchenko and Giovanni Neglia and Bruno
Ribeiro",
title = "Feedforward Neural Networks for Caching: Enough or Too
Much?",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "139--142",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308958",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a caching policy that uses a feedforward
neural network (FNN) to predict content popularity. Our
scheme outperforms popular eviction policies like LRU
or ARC, but also a new policy relying on the more
complex recurrent neural networks. At the same time,
replacing the FNN predictor with a naive linear
estimator does not degrade caching performance
significantly, questioning then the role of neural
networks for these applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Trevisan:2018:RUC,
author = "Martino Trevisan and Idilio Drago",
title = "Robust {URL} Classification With Generative
Adversarial Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "143--146",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308959",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Classifying URLs is essential for different
applications, such as parental control, URL filtering
and Ads/tracking protection. Such systems historically
identify URLs by means of regular expressions, even if
machine learning alternatives have been proposed to
overcome the time-consuming maintenance of
classification rules. Classical machine learning
algorithms, however, require large samples of URLs to
train the models, covering the diverse classes of URLs
(i.e., a ground truth), which somehow limits the
applicability of the approach. We here give a first
step towards the use of Generative Adversarial Neural
Networks (GANs) to classify URLs. GANs are attractive
for this problem for two reasons. First, GANs can
produce samples of URLs belonging to specific classes
even if exposed to a limited training set, outputting
both synthetic traces and a robust discriminator.
Second, a GAN can be trained to discriminate a class of
URLs without being exposed to all other URLs classes
--- i.e., GANs are robust even if not exposed to
uninteresting URL classes during training. Experiments
on real data show that not only the generated synthetic
traces are somehow realistic, but also the URL
classification is accurate with GANs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marin:2018:DMR,
author = "Gonzalo Mar{\'\i}n and Pedro Casas and Germ{\'a}n
Capdehourat",
title = "{DeepSec} meets {RawPower} --- Deep Learning for
Detection of Network Attacks Using Raw
Representations",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "147--150",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308960",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The application of machine learning models to the
analysis of network traffic measurements has largely
grown in recent years. In the networking domain,
shallow models are usually applied, where a set of
expert handcrafted features are needed to fix the data
before training. There are two main problems associated
with this approach: firstly, it requires expert domain
knowledge to select the input features, and secondly,
different sets of custom-made input features are
generally needed according to the specific target
(e.g., network security, anomaly detection, traffic
classification). On the other hand, the power of
machine learning models using deep architectures (i.e.,
deep learning) for networking has not been yet highly
explored. In this paper we explore the power of deep
learning models on the specific problem of detection of
network attacks, using different representations for
the input data. As a mayor advantage as compared to the
state of the art, we consider raw measurements coming
directly from the stream of monitored bytes as the
input to the proposed models, and evaluate different
raw-traffic feature representations, including packet
and flow-level ones.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Piskozub:2018:MDM,
author = "Michal Piskozub and Riccardo Spolaor and Ivan
Martinovic",
title = "{MalAlert}: Detecting Malware in Large-Scale Network
Traffic Using Statistical Features",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "151--154",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308961",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, we witness the spreading of a
significant variety of malware, which operate and
propagate relying on network communications. Due to the
staggering growth of traffic in the last years,
detecting malicious software has become infeasible on a
packet-by-packet basis. In this paper, we address this
challenge by investigating malware behaviors and
designing a method to detect them relying only on
network flow-level data. In our analysis we identify
malware types with regards to their impact on a network
and the way they achieve their malicious purposes.
Leveraging this knowledge, we propose a machine
learning-based and privacy-preserving method to detect
malware. We evaluate our results on two malware
datasets (MalRec and CTU-13) containing traffic of over
65,000 malware samples, as well as one month of network
traffic from the University of Oxford containing over
23 billion flows. We show that despite the
coarse-grained information provided by network flows
and the imbalance between legitimate and malicious
traffic, MalAlert can distinguish between different
types of malware with the F1 score of 90\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wassermann:2018:MLM,
author = "Sarah Wassermann and Nikolas Wehner and Pedro Casas",
title = "Machine Learning Models for {YouTube QoE} and User
Engagement Prediction in {Smartphones}",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "155--158",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308962",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measuring and monitoring YouTube Quality of Experience
is a challenging task, especially when dealing with
cellular networks and smartphone users. Using a
large-scale database of crowdsourced YouTube-QoE
measurements in smartphones, we conceive multiple
machine-learning models to infer different
YouTube-QoE-relevant metrics and user-behavior- related
metrics from network-level measurements, without
requiring root access to the smartphone, video-player
embedding, or any other reverse-engineering-like
approaches. The dataset includes measurements from more
than 360 users worldwide, spanning over the last five
years. Our preliminary results suggest that QoE-based
monitoring of YouTube mobile can be realized through
machine learning models with high accuracy, relying
only on network-related features and without accessing
any higher-layer metric to perform the estimations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Manzo:2018:DLS,
author = "Gaetano Manzo and Juan Sebastian Otalora and Marco
Ajmone Marsan and Gianluca Rizzo",
title = "A Deep Learning Strategy for Vehicular Floating
Content Management",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "159--162",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308963",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Floating Content (FC) is a communication paradigm for
the local dissemination of contextualized information
through D2D connectivity, in a way which minimizes the
use of resources while achieving some specified
performance target. Existing approaches to FC
dimensioning are based on unrealistic system
assumptions that make them, highly inaccurate and
overly conservative when applied in realistic settings.
In this paper, we present a first step towards the
development of a cognitive approach to efficient
dynamic management of FC. We propose a deep learning
strategy for FC dimensioning, which exploits a
Convolutional Neural Network (CNN) to efficiently
modulate over time the resources employed by FC in a
QoS-aware manner. Numerical evaluations show that our
approach achieves a maximum rejection rate of 3\%, and
resource savings of 37.5\% with respect to the
benchmark strategy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2018:LDM,
author = "Rui Li and Chaoyun Zhang and Paul Patras and Razvan
Stanica and Fabrice Valois",
title = "Learning Driven Mobility Control of Airborne Base
Stations in Emergency Networks",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "163--166",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308964",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mobile base stations mounted on unmanned aerial
vehicles (UAVs) provide viable wireless coverage
solutions in challenging landscapes and conditions,
where cellular/WiFi infrastructure is unavailable.
Operating multiple such airborne base stations, to
ensure reliable user connectivity, demands intelligent
control of UAV movements, as poor signal strength and
user outage can be catastrophic to mission critical
scenarios. In this paper, we propose a deep
reinforcement learning based solution to tackle the
challenges of base stations mobility control. We design
an Asynchronous Advantage Actor-Critic (A3C) algorithm
that employs a custom reward function, which
incorporates SINR and outage events information, and
seeks to provide mobile user coverage with the highest
possible signal quality. Preliminary results reveal
that our solution converges after $ 4 \times 10^5 $
steps of training, after which it outperforms a
benchmark gradient-based alternative, as we attain 5dB
higher median SINR during an entire test mission of
10,000 steps.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garcia:2018:RCP,
author = "Johan Garcia and Topi Korhonen",
title = "On Runtime and Classification Performance of the
Discretize--Optimize {(DISCO)} Classification
Approach",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "167--170",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308965",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using machine learning in high-speed networks for
tasks such as flow classification typically requires
either very resource efficient classification
approaches, large amounts of computational resources,
or specialized hardware. Here we provide a sketch of
the discretize-optimize (DISCO) approach which can
construct an extremely efficient classifier for low
dimensional problems by combining feature selection,
efficient discretization, novel bin placement, and
lookup. As feature selection and discretization
parameters are crucial, appropriate combinatorial
optimization is an important aspect of the approach. A
performance evaluation is performed for a YouTube
classification task using a cellular traffic data set.
The initial evaluation results show that the DISCO
approach can move the Pareto boundary in the
classification performance versus runtime trade-off by
up to an order of magnitude compared to runtime
optimized random forest and decision tree
classifiers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hanawal:2018:DLA,
author = "Manjesh K. Hanawal and Sumit J. Darak",
title = "Distributed Learning in Ad-Hoc Networks with Unknown
Number of Players",
journal = j-SIGMETRICS,
volume = "46",
number = "3",
pages = "171--174",
month = dec,
year = "2018",
CODEN = "????",
DOI = "https://doi.org/10.1145/3308897.3308966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Feb 2 07:14:43 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study algorithms for distributed learning in ad-hoc
cognitive networks where no central controller is
available. In such networks, the players cannot
communicate with each other and even may not know how
many other players are present in the network. If
multiple players select a common channel they collide,
which results in loss of throughput for the colliding
players. We consider both the static and dynamic
scenarios where the number of players remains fixed
throughout the game in the former case and can change
in the later. We provide algorithms based on a novel
'trekking approach' that guarantees with high
probability constant regret for the static case and
sub-linear regret for the dynamic case. The trekking
approach gives improved aggregate throughput and also
results in fewer collisions compared to the
state-of-the-art algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2019:ESC,
author = "Ranjan Pal and Aditya Ahuja and Sung-Han Lin and
Abhishek Kumar and Leana Golubchik and Nachikethas A.
Jagadeesan",
title = "On the Economic Sustainability of Cloud Sharing
Systems Are Dynamic Single Resource Sharing Markets
Stable?",
journal = j-SIGMETRICS,
volume = "46",
number = "4",
pages = "2--10",
month = mar,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3372315.3372317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:05 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The recent emergence of the small cloud (SC), both in
concept and in practice, has been driven mainly by
issues related to service cost and complexity of
commercial cloud providers (e.g., Amazon) employing
massive data centers. However, the resource
inelasticity problem [29] faced by the SCs due to their
relatively scarce resources might lead to a potential
degradation of customer QoS and loss of revenue. A
proposed solution to this problem recommends the
sharing of resources between competing SCs to alleviate
the resource inelasticity issues that might arise.
Based on this idea, a recent effort ([18]) proposed
SC-Share, a performance-driven static market model for
competitive small cloud environments that results in an
efficient market equilibrium jointly optimizing
customer QoS satisfaction and SC revenue generation.
However, an important question with a non-obvious
answer still remains to be answered, without which SC
sharing markets may not be guaranteed to sustain in the
long-run --- is it still possible to achieve a stable
market efficient state when the supply of SC resources
is dynamic in nature?. In this paper, we take a first
step to addressing the problem of efficient market
design for single SC resource sharing in dynamic
environments. We answer our previous question in the
affirmative through the use of Arrow and Hurwicz's
disequilibrium process [9, 10] in economics, and the
gradient play technique in game theory that allows us
to iteratively converge upon efficient and stable
market equilibria.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2019:CLB,
author = "Lili Su and Martin Zubeldia and Nancy Lynch",
title = "Collaboratively Learning the Best Option on Graphs,
Using Bounded Local Memory",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "1--2",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376932",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376932",
abstract = "We consider multi-armed bandit problems in social
groups wherein each individual has bounded memory and
shares the common goal of learning the best arm/option.
We say an individual learns the best option if
eventually (as t - \infty ) it pulls only the arm
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hoffmann:2019:LGN,
author = "Jessica Hoffmann and Constantine Caramanis",
title = "Learning Graphs from Noisy Epidemic Cascades",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "3--4",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376933",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376933",
abstract = "Epidemic models accurately represent (among other
processes) the spread of diseases, information (rumors,
viral videos, news stories, etc.), the spread of
malevolent agents in a network (computer viruses,
malicious apps, etc.), or even biological \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2019:HTD,
author = "Xingyu Zhou and Jian Tan and Ness Shroff",
title = "Heavy-traffic Delay Optimality in Pull-based Load
Balancing Systems: Necessary and Sufficient
Conditions",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "5--6",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376935",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376935",
abstract = "In this paper, we consider a load balancing system
under a general pull-based policy. In particular, each
arrival is randomly dispatched to any server whose
queue length is below a threshold; if no such server
exists, then the arrival is randomly \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellemans:2019:PAW,
author = "Tim Hellemans and Tejas Bodas and Benny {Van Houdt}",
title = "Performance Analysis of Workload Dependent Load
Balancing Policies",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "7--8",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376936",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376936",
abstract = "Load balancing plays a crucial role in achieving low
latency in large distributed systems. Recent load
balancing strategies often rely on replication or use
placeholders to further improve latency. However
assessing the performance and stability of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2019:LBG,
author = "Isaac Grosof and Ziv Scully and Mor Harchol-Balter",
title = "Load Balancing Guardrails: Keeping Your Heavy Traffic
on the Road to Low Response Times",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "9--10",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376937",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376937",
abstract = "Load balancing systems, comprising a central
dispatcher and a scheduling policy at each server, are
widely used in practice, and their response time has
been extensively studied in the theoretical literature.
While much is known about the scenario where \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2019:RWB,
author = "Dengwang Tang and Vijay G. Subramanian",
title = "Random Walk Based Sampling for Load Balancing in
Multi-Server Systems",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "11--12",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376938",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376938",
abstract = "In multi-server systems, a classical job assignment
algorithm works as follows: at the arrival of each job,
pick d servers independently and uniformly at random
and send the job to the least loaded server among the d
servers. This model is known as the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2019:NRA,
author = "Lan N. Nguyen and My T. Thai",
title = "Network Resilience Assessment via {QoS} Degradation
Metrics: an Algorithmic Approach",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "13--14",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376940",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376940",
abstract = "This paper focuses on network resilience to
perturbation of edge weight. Other than connectivity,
many network applications nowadays rely upon some
measure of network distance between a pair of connected
nodes. In these systems, a metric related to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:2019:TBA,
author = "Dhruv Kumar and Jian Li and Ramesh Sitaraman and
Abhishek Chandra",
title = "A {TTL}-based Approach for Data Aggregation in
Geo-distributed Streaming Analytics",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "15--16",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376941",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376941",
abstract = "Streaming data analytics has been an important topic
of research in recent years. Large quantities of data
are generated continuously over time across a variety
of application domains such as web and social
analytics, scientific computing and energy \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nikolopoulos:2019:RPS,
author = "Pavlos Nikolopoulos and Christos Pappas and Katerina
Argyraki and Adrian Perrig",
title = "Retroactive Packet Sampling for Traffic Receipts",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "17--18",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376942",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376942",
abstract = "Is it possible to design a packet-sampling algorithm
that prevents the network node that performs the
sampling from treating the sampled packets
preferentially? We study this problem in the context of
designing a {``network-transparency''} system. In this
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sermpezis:2019:ICI,
author = "Pavlos Sermpezis and Vasileios Kotronis",
title = "Inferring Catchment in {Internet} Routing",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "19--20",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376943",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376943",
abstract = "BGP is the de-facto Internet routing protocol for
interconnecting Autonomous Systems (AS). Each AS
selects its preferred routes based on its routing
policies, which are typically not disclosed. Due to the
distributed route selection and information \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akram:2019:CGP,
author = "Shoaib Akram and Jennifer B. Sartor and Kathryn S.
McKinley and Lieven Eeckhout",
title = "{Crystal Gazer}: Profile-Driven Write-Rationing
Garbage Collection for Hybrid Memories",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "21--22",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376945",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376945",
abstract = "Emerging non-volatile memory (NVM) technologies offer
greater capacity than DRAM. Unfortunately, production
NVM exhibits high latency and low write endurance.
Hybrid memory combines DRAM and NVM to deliver greater
capacity, low latency, high endurance, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Karakoy:2019:AAA,
author = "Mustafa Karakoy and Orhan Kislal and Xulong Tang and
Mahmut Taylan Kandemir and Meenakshi Arunachalam",
title = "Architecture-Aware Approximate Computing",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "23--24",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376946",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376946",
abstract = "Observing that many application programs from
different domains can live with less-than-perfect
accuracy, existing techniques try to trade off program
output accuracy with performance-energy savings. While
these works provide point solutions, they leave
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2019:QDL,
author = "Xulong Tang and Ashutosh Pattnaik and Onur Kayiran and
Adwait Jog and Mahmut Taylan Kandemir and Chita Das",
title = "Quantifying Data Locality in Dynamic Parallelism in
{GPUs}",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "25--26",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376947",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/pvm.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376947",
abstract = "Dynamic parallelism (DP) is a new feature of emerging
GPUs that allows new kernels to be generated and
scheduled from the device-side (GPU) without the
host-side (CPU) intervention. To efficiently support
DP, one of the major challenges is to saturate the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2019:CND,
author = "Xulong Tang and Mahmut Taylan Kandemir and Hui Zhao
and Myoungsoo Jung and Mustafa Karakoy",
title = "Computing with Near Data",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "27--28",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376948",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376948",
abstract = "The cost of moving data between compute elements and
storage elements plays a significant role in shaping
the overall performance of applications. We present a
compiler-driven approach to reducing data movement
costs. Our approach, referred to as \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balseiro:2019:DPR,
author = "Santiago R. Balseiro and David B. Brown and Chen
Chen",
title = "Dynamic Pricing of Relocating Resources in Large
Networks",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "29--30",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376950",
abstract = "We study dynamic pricing of resources that are
distributed over a network of locations (e.g., shared
vehicle systems and logistics networks). Customers with
private willingness-to-pay sequentially request to
relocate a resource from one location to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alijani:2019:STT,
author = "Reza Alijani and Siddhartha Banerjee and Sreenivas
Gollapudi and Kostas Kollias and Kamesh Munagala",
title = "The Segmentation-Thickness Tradeoff in Online
Marketplaces",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "31--32",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376951",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376951",
abstract = "A core tension in the operations of online
marketplaces is between segmentation (wherein platforms
can increase revenue by segmenting the market into ever
smaller sub-markets) and thickness (wherein the size of
the sub-market affects the utility \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:2019:VLA,
author = "Ming Shi and Xiaojun Lin and Lei Jiao",
title = "On the Value of Look-Ahead in Competitive Online
Convex Optimization",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "33--34",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376952",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376952",
abstract = "Although using look-ahead information is known to
improve the competitive ratios of online convex
optimization (OCO) problems with switching costs, the
competitive ratios obtained from existing results often
depend on the cost coefficients of the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2019:COO,
author = "Qiulin Lin and Hanling Yi and John Pang and Minghua
Chen and Adam Wierman and Michael Honig and Yuanzhang
Xiao",
title = "Competitive Online Optimization under Inventory
Constraints",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "35--36",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376953",
abstract = "This paper studies online optimization under inventory
(budget) constraints. While online optimization is a
well-studied topic, versions with inventory constraints
have proven difficult. We consider a formulation of
inventory-constrained optimization \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2019:ALB,
author = "Haoran Yu and Ermin Wei and Randall A. Berry",
title = "Analyzing Location-Based Advertising for Vehicle
Service Providers Using Effective Resistances",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "37--38",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376955",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376955",
abstract = "Vehicle service providers can display commercial ads
in their vehicles based on passengers' origins and
destinations to create a new revenue stream. We study a
vehicle service provider who can generate different ad
revenues when displaying ads on \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vial:2019:SRP,
author = "Daniel Vial and Vijay Subramanian",
title = "A Structural Result for Personalized {PageRank} and
its Algorithmic Consequences",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "39--40",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376956",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/pagerank.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376956",
abstract = "Many natural and man-made systems can be represented
as graphs, sets of objects (called nodes) and pairwise
relations between these objects (called edges). These
include the brain, which contains neurons (nodes) that
exchange signals through chemical \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cayci:2019:LCR,
author = "Semih Cayci and Atilla Eryilmaz and R. Srikant",
title = "Learning to Control Renewal Processes with Bandit
Feedback",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "41--42",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376957",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376957",
abstract = "We consider a bandit problem with K task types from
which the controller activates one task at a time. Each
task takes a random and possibly heavy-tailed
completion time, and a reward is obtained only after
the task is completed. The task types are \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Henzinger:2019:EDR,
author = "Monika Henzinger and Stefan Neumann and Stefan
Schmid",
title = "Efficient Distributed Workload (Re-){Embedding}",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "43--44",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376959",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376959",
abstract = "Modern networked systems are increasingly
reconfigurable, enabling demand-aware infrastructures
whose resources can be adjusted according to the
workload they currently serve. Such dynamic adjustments
can be exploited to improve network utilization and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ambati:2019:OCE,
author = "Pradeep Ambati and David Irwin",
title = "Optimizing the Cost of Executing Mixed Interactive and
Batch Workloads on Transient {VMs}",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "45--46",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376960",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376960",
abstract = "Container Orchestration Platforms (COPs), such as
Kubernetes, are increasingly used to manage large-scale
clusters by automating resource allocation between
applications encapsulated in containers. Increasingly,
the resources underlying COPs are virtual \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comden:2019:OOC,
author = "Joshua Comden and Sijie Yao and Niangjun Chen and
Haipeng Xing and Zhenhua Liu",
title = "Online Optimization in Cloud Resource Provisioning:
Predictions, Regrets, and Algorithms",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "47--48",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376961",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376961",
abstract = "Several different control methods are used in practice
or have been proposed to cost-effectively provision IT
resources. Due to the dependency of many control
methods on having accurate predictions of the future to
make good provisioning decisions, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Quan:2019:NFM,
author = "Guocong Quan and Jian Tan and Atilla Eryilmaz and Ness
Shroff",
title = "A New Flexible Multi-flow {LRU} Cache Management
Paradigm for Minimizing Misses",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "49--50",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376962",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376962",
abstract = "The Least Recently Used (LRU) caching and its variants
are used in large-scale data systems in order to
provide high-speed data access for a wide class of
applications. Nonetheless, a fundamental question still
remains: in order to minimize the miss \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zarchy:2019:ACC,
author = "Doron Zarchy and Radhika Mittal and Michael Schapira
and Scott Shenker",
title = "Axiomatizing Congestion Control",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "51--52",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376964",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376964",
abstract = "Recent years have witnessed a revival of both
industrial and academic interest in improving
congestion control designs. The quest for better
congestion control is complicated by the extreme
diversity and range of (i) the design space (as
exemplified by \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2019:IMC,
author = "Kuang Xu and Yuan Zhong",
title = "Information, Memory and Capacity in Dynamic Resource
Allocation",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "53--54",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376965",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376965",
abstract = "We propose a general framework, dubbed Stochastic
Processing under Imperfect Information (SPII), to study
the impact of information constraints and memories on
dynamic resource allocation. The framework involves a
Stochastic Processing Network (SPN) \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Amjad:2019:MMD,
author = "Muhammad Jehangir Amjad and Vishal Misra and Devavrat
Shah and Dennis Shen",
title = "{mRSC}: Multi-dimensional Robust Synthetic Control",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "55--56",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376966",
abstract = "When evaluating the impact of a policy (e.g., gun
control) on a metric of interest (e.g., crime-rate), it
may not be possible or feasible to conduct a randomized
control trial. In such settings where only
observational data is available, synthetic \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jose:2019:DAC,
author = "Lavanya Jose and Stephen Ibanez and Mohammad Alizadeh
and Nick McKeown",
title = "A Distributed Algorithm to Calculate Max-Min Fair
Rates Without Per-Flow State",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "57--58",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376967",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376967",
abstract = "Most congestion control algorithms, like TCP, rely on
a reactive control system that detects congestion, then
marches carefully towards a desired operating point
(e.g. by modifying the window size or adjusting a
rate). In an effort to balance stability \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2019:GAO,
author = "Benny {Van Houdt}",
title = "Global Attraction of {ODE}-based Mean Field Models
with Hyperexponential Job Sizes",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "59--60",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376969",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376969",
abstract = "Mean field modeling is a popular approach to assess
the performance of large scale computer systems. The
evolution of many mean field models is characterized by
a set of ordinary differential equations that have a
unique fixed point. In order to prove \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanderBoor:2019:HSJ,
author = "Mark van der Boor and Sem Borst and Johan van
Leeuwaarden",
title = "Hyper-Scalable {JSQ} with Sparse Feedback",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "61--62",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376970",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376970",
abstract = "Load balancing algorithms play a vital role in
enhancing performance in data centers and cloud
networks. Due to the massive size of these systems,
scalability challenges, and especially the
communication overhead associated with load balancing
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2019:TEK,
author = "Florin Ciucu and Felix Poloczek",
title = "Two Extensions of {Kingman}'s {GI/G/1} Bound",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "63--64",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376971",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376971",
abstract = "A simple bound in GI/G/1 queues was obtained by
Kingman using a discrete martingale transform [5]. We
extend this technique to (1) multiclass (GI/G/1) queues
and (2) Markov Additive Processes (MAPs) whose
background processes can be time-inhomogeneous or
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2019:QLD,
author = "Florin Ciucu and Felix Poloczek and Amr Rizk",
title = "Queue and Loss Distributions in Finite-Buffer Queues",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "65--66",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376972",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376972",
abstract = "We derive simple bounds on the queue distribution in
finite-buffer queues with Markovian arrivals. The
bounds capture a truncated exponential behavior,
involving joint horizontal and vertical shifts of an
exponential function; this is fundamentally \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2019:IQS,
author = "Jiaming Xu and Yuan Zhong",
title = "Improved Queue-Size Scaling for Input-Queued Switches
via Graph Factorization",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "67--68",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376973",
abstract = "This paper studies the scaling of the expected total
queue size in an $ n \times n $ input-queued switch, as
a function of both the load --- and the system scale n.
We provide a new class of scheduling policies under
which the expected total queue size scales as
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2019:TWD,
author = "Youil Han and Bryan S. Kim and Jeseong Yeon and
Sungjin Lee and Eunji Lee",
title = "{TeksDB}: Weaving Data Structures for a
High-Performance Key--Value Store",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "69--70",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376975",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376975",
abstract = "Key-value stores (KVS) are now an integral part of
modern data-intensive systems. thanks to its
simplicity, scalability, and efficiency over
traditional database systems. Databases such as MySQL
employ KVS (in this case, RocksDB as their backend
storage \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Radulovic:2019:PMS,
author = "Milan Radulovic and Rommel S{\'a}nchez Verdejo and
Paul Carpenter and Petar Radojkovi{\'c} and Bruce Jacob
and Eduard Ayguad{\'e}",
title = "{PROFET}: Modeling System Performance and Energy
Without Simulating the {CPU}",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "71--72",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376976",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376976",
abstract = "Application performance on novel memory systems is
typically estimated using a hardware simulator. The
simulation is, however, time consuming, which limits
the number of design options that can be explored
within a practical length of time. Also, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wei:2019:HBS,
author = "Song Wei and Kun Zhang and Bibo Tu",
title = "{HyperBench}: a Benchmark Suite for Virtualization
Capabilities",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "73--74",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376977",
abstract = "Virtualization is ubiquitous in modern data centers.
By deploying applications on separate virtual machines
hosted in a shared physical machine, it brings benefits
over traditional systems in resources utilization[5,
10], system security[2, 3], and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2019:AMD,
author = "Lei Zhang and Zhemin Yang and Yuyu He and Mingqi Li
and Sen Yang and Min Yang and Yuan Zhang and Zhiyun
Qian",
title = "App in the Middle: Demystify Application
Virtualization in {Android} and its Security Threats",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "75--76",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376978",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376978",
abstract = "Customizability is a key feature of the Android
operating system that differentiates it from Apple's
iOS. One concrete feature that gaining popularity is
called ``app virtualization''. This feature allows
multiple copies of the same app to be installed
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ngoc:2019:EYS,
author = "Tu Dinh Ngoc and Bao Bui and Stella Bitchebe and Alain
Tchana and Valerio Schiavoni and Pascal Felber and
Daniel Hagimont",
title = "Everything You Should Know About {Intel SGX}
Performance on Virtualized Systems",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "77--78",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376979",
abstract = "Intel SGX has attracted much attention from academia
and is already powering commercial applications. Cloud
providers have also started implementing SGX in their
cloud offerings. Research efforts on Intel SGX so far
have mainly focused on its security \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wei:2019:QMO,
author = "Honghao Wei and Xiaohan Kang and Weina Wang and Lei
Ying",
title = "{QuickStop}: a {Markov} Optimal Stopping Approach for
Quickest Misinformation Detection",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "79--80",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376981",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376981",
abstract = "This paper combines data-driven and model-driven
methods for real-time misinformation detection. Our
algorithm, named Quick- Stop, is an optimal stopping
algorithm based on a probabilistic information
spreading model obtained from labeled data. The
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vera:2019:BPL,
author = "Alberto Vera and Siddhartha Banerjee",
title = "The {Bayesian} Prophet: a Low-Regret Framework for
Online Decision Making",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "81--82",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376982",
abstract = "Motivated by the success of using black-box predictive
algorithms as subroutines for online decision-making,
we develop a new framework for designing online
policies given access to an oracle providing
statistical information about an offline benchmark.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2019:SDG,
author = "Lili Su and Jiaming Xu",
title = "Securing Distributed Gradient Descent in High
Dimensional Statistical Learning",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "83--84",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376983",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376983",
abstract = "We consider unreliable distributed learning systems
wherein the training data is kept confidential by
external workers, and the learner has to interact
closely with those workers to train a model. In
particular, we assume that there exists a system
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agarwal:2019:MAT,
author = "Anish Agarwal and Muhammad Jehangir Amjad and Devavrat
Shah and Dennis Shen",
title = "Model Agnostic Time Series Analysis via Matrix
Estimation",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "85--86",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376984",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376984",
abstract = "We propose an algorithm to impute and forecast a time
series by transforming the observed time series into a
matrix, utilizing matrix estimation to recover missing
values and de-noise observed entries, and performing
linear regression to make \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pourghassemi:2019:WIA,
author = "Behnam Pourghassemi and Ardalan Amiri Sani and Aparna
Chandramowlishwaran",
title = "What-If Analysis of Page Load Time in {Web} Browsers
Using Causal Profiling",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "87--88",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376986",
abstract = "Web browsers have become one of the most commonly used
applications for desktop and mobile users. Despite
recent advances in network speeds and several
techniques to speed up web page loading, browsers still
suffer from relatively long page load time \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2019:PCL,
author = "Ran Liu and Edmund Yeh and Atilla Eryilmaz",
title = "Proactive Caching for Low Access-Delay Services under
Uncertain Predictions",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "89--90",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376987",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376987",
abstract = "Network traffic for delay-sensitive services has
become a dominant part in the network. Proactive
caching with the aid of predictive information has been
proposed as a promising method to enhance delay
performance. In this paper, we analytically \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2019:UNP,
author = "Xiao Zhu and Yihua Ethan Guo and Ashkan Nikravesh and
Feng Qian and Z. Morley Mao",
title = "Understanding the Networking Performance of {Wear
OS}",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "91--92",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376988",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376988",
abstract = "Networking on wearable devices such as smart watches
is becoming increasingly important as fueled by new
hardware, OS support, and applications. In this work,
we conduct a first in-depth investigation of the
networking performance of Wear OS, one of the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghose:2019:DCD,
author = "Saugata Ghose and Tianshi Li and Nastaran Hajinazar
and Damla Senol Cali and Onur Mutlu",
title = "Demystifying Complex Workload-{DRAM} Interactions: an
Experimental Study",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "93--93",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376989",
abstract = "It has become increasingly difficult to understand the
complex interaction between modern applications and
main memory, composed of Dynamic Random Access Memory
(DRAM) chips. Manufacturers and researchers are
developing many different types of DRAM, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2019:NMM,
author = "Chul-Ho Lee and Min Kang and Do Young Eun",
title = "Non-{Markovian} {Monte Carlo} on Directed Graphs",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "94--95",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376991",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376991",
abstract = "Markov Chain Monte Carlo (MCMC) has been the de facto
technique for sampling and inference of large graphs
such as online social networks. At the heart of MCMC
lies the ability to construct an ergodic-Markov chain
that attains any given stationary \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2019:ACL,
author = "Osman Emre Dai and Daniel Cullina and Negar Kiyavash
and Matthias Grossglauser",
title = "Analysis of a Canonical Labeling Algorithm for the
Alignment of Correlated {Erd{\H{o}}s--R{\'e}nyi}
Graphs",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "96--97",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376992",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376992",
abstract = "Graph alignment in two correlated random graphs refers
to the task of identifying the correspondence between
vertex sets of the graphs. Recent results have
characterized the exact information-theoretic threshold
for graph alignment in correlated
Erd{\H{o}}s--\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Combes:2019:CEE,
author = "Richard Combes and Mikael Touati",
title = "Computationally Efficient Estimation of the Spectral
Gap of a {Markov} Chain",
journal = j-SIGMETRICS,
volume = "47",
number = "1",
pages = "98--100",
month = dec,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3376930.3376993",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:15:26 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3376930.3376993",
abstract = "We consider the problem of estimating from sample
paths the absolute spectral gap 1 - \lambda of a
reversible, irreducible and aperiodic Markov chain
(Xt)tEURN over a finite state space \Omega. We propose
the UCPI (Upper Confidence Power Iteration) algorithm
for \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2019:SIW,
author = "Mark S. Squillante",
title = "Special Issue on {The Workshop on MAthematical
performance Modeling and Analysis (MAMA 2019)}",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "2--2",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374890",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abuthahir:2019:DWN,
author = "Abuthahir and Gaurav Raina and Thomas Voice",
title = "Do we need two forms of feedback in the {Rate Control
Protocol (RCP)}?",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "3--5",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is considerable interest in the networking
community in explicit congestion control as it may
allow the design of a fair, stable, low loss, low
delay, and high utilization network. The Rate Control
Protocol (RCP) is an example of such a congestion
control protocol. The current design of RCP suggests
that it should employ two forms of feedback; i.e. rate
mismatch and queue size, in order to manage its flow
control algorithms. An outstanding design question in
RCP is whether the presence of queue size feedback is
useful, given the presence of feedback based on rate
mismatch. To address this question, we conduct analysis
(stability and Hopf bifurcation) and packet-level
simulations. The analytical results reveal that the
presence of queue size feedback in the protocol
specification may induce a sub-critical Hopf
bifurcation, which can lead to undesirable system
behavior. The analysis is corroborated by numerical
computations and some packet-level simulations. Based
on our work, the suggestion for RCP is to only include
feedback based on rate mismatch in the design of the
protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Yes, first author has only one name.",
}
@Article{Goel:2019:OAS,
author = "Gautam Goel and Adam Wierman",
title = "An Online Algorithm for Smoothed Online Convex
Optimization",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "6--8",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374892",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider Online Convex Optimization (OCO) in the
setting where the costs are m-strongly convex and the
online learner pays a switching cost for changing
decisions between rounds. We show that the recently
proposed Online Balanced Descent (OBD) algorithm is
constant competitive in this setting, with competitive
ratio 3+O(1/m), irrespective of the ambient dimension.
We demonstrate the generality of our approach by
showing that the OBD framework can be used to construct
competitive a algorithm for LQR control.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2019:OPP,
author = "Xiaoqi Tan and Alberto Leon-Garcia and Danny H. K.
Tsang",
title = "Optimal Posted Prices for Online Resource Allocation
with Supply Costs",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "9--11",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374893",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a general online resource allocation problem,
where a provider sells multiple types of
capacity-limited resources to heterogeneous customers
that arrive in a sequential and arbitrary manner. The
provider charges payment from customers who purchase a
bundle of resources but must pay an increasing supply
cost with respect to the total resource allocated. The
goal is to maximize the social welfare, namely, the
total valuation of customers for their purchased
bundles, minus the total supply cost of the provider
for all the resources that have been allocated. We
adopt the competitive analysis framework and provide an
optimal posted-pricing mechanism (PPM). Our PPM is
optimal in the sense that no other online algorithms
can achieve a better competitive ratio.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gardner:2019:SDH,
author = "Kristen Gardner and Cole Stephens",
title = "Smart Dispatching in Heterogeneous Systems",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "12--14",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In multi-server systems, selecting to which server to
dispatch an arriving job is a key factor influencing
system response time. One of the most widely studied
policies is Join-the-Shortest-Queue (JSQ), which is
known to minimize mean response time in certain
settings [7]. Many variants on JSQ have been proposed,
including JSQ-d, under which a job is dispatched to the
shortest queue among d servers selected uniformly at
random [3, 5]; Join-Idle-Queue (JIQ), under which the
dispatcher knows which servers are idle but not the
queue lengths of non-idle servers [2]; and others. The
vast majority of work analyzing JSQ and related
policies makes a key assumption: that the system is
homogeneous, meaning that all servers have the same
speed. This assumption is inaccurate in most modern
computer systems. Server heterogeneity can arise, e.g.,
when a server farm consists of several generations of
hardware, or when many virtual machines contend for
resources on the same physical machine. Unfortunately,
the wealth of results about how best to dispatch in
homogeneous systems does not translate well to
heterogeneous systems. Policies like JSQ-d and JIQ,
which can achieve near-optimal performance in
homogeneous systems, can lead to unacceptably high
response times and even instability in heterogeneous
systems [4, 8].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anton:2019:RPS,
author = "Elene Anton and Urtzi Ayesta and Matthieu Jonckheere
and Ina Maria Verloop",
title = "Redundancy with Processor Sharing servers",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "15--17",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374895",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The main motivation to investigate redundancy models
comes from empirical evidence suggesting that
redundancy can help improve the performance of
real-world applications. Under redundancy, a job that
arrives to this system is dispatched to d servers
uniformly chosen at random in order to benefit from the
variability of the length of these queues. As soon as
one of the copies finishes service, the job (and its
copies) is removed from the system, and as a
consequence, a job's delay is given by the minimum
delay among the servers its copies are sent to. Most of
the literature on performance evaluation of redundancy
systems has been carried out when First Come First
Served (FCFS) is implemented in the servers. In
particular, for exponential service time distributions,
Gardner et al. [4, 5] and Bonald and Comte [2] show
that the stability region is not reduced due to adding
redundant copies. In this extended abstract, we focus
instead on Processor Sharing (PS) service policy and
study how redundancy impacts the stability condition.
In particular, we aim to study the impact that the
correlation structure of the copies has on the
performance of the redundancy-d model. In a recent
paper, Gardner et al. [3] showed that the assumption of
independent and identically distributed (i.i.d.)
copies, can be unrealistic, and that it might lead to
theoretical results that do not reflect the results of
replication schemes in real-life computer systems. We
consider the two extreme cases of correlation; (i) the
copies are i.i.d. (ii) the copies of a job are exact
replicas (identical copies). We observe that the
stability condition strongly depends on the correlation
structure, as well as on the number of redundant
copies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berg:2019:HOS,
author = "Benjamin Berg and Rein Vesilo and Mor Harchol-Balter",
title = "{heSRPT}: Optimal Scheduling of Parallel Jobs with
Known Sizes",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "18--20",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Nearly all modern data centers serve workloads which
are capable of exploiting parallelism. When a job
parallelizes across multiple servers it will complete
more quickly, but jobs receive diminishing returns from
being allocated additional servers. Because allocating
multiple servers to a single job is inefficient, it is
unclear how best to share a fixed number of servers
between many parallelizable jobs. In this paper, we
provide the first closed form expression for the
optimal allocation of servers to jobs. Specifically, we
specify the number of servers that should be allocated
to each job at every moment in time. Our solution is a
combination of favoring small jobs (as in SRPT
scheduling) while still ensuring high system
efficiency. We call our scheduling policy
high-efficiency SRPT (heSRPT).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2019:CAS,
author = "Yu Su and Xiaoqi Ren and Shai Vardi and Adam Wierman
and Yuxiong He",
title = "Communication-Aware Scheduling of
Precedence-Constrained Tasks",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "21--23",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374897",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Jobs in large-scale machine learning platforms are
expressed using a computational graph of tasks with
precedence constraints. To handle such
precedence-constrained tasks that have
machine-dependent communication demands in settings
with heterogeneous service rates and communication
times, we propose a new scheduling framework,
Generalized Earliest Time First (GETF), that improves
upon state of-the-art results in the area.
Specifically, we provide the first provable, worst-case
approximation guarantee for the goal of minimizing the
makespan of tasks with precedence constraints on
related machines with machine-dependent communication
times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2019:SNO,
author = "Ziv Scully and Mor Harchol-Balter and Alan
Scheller-Wolf",
title = "Simple Near-Optimal Scheduling for the {M/G/1}",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "24--26",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of preemptively scheduling
jobs to minimize mean response time of an M/G/1 queue.
When the scheduler knows each job's size, the shortest
remaining processing time (SRPT) policy is optimal.
Unfortunately, in many settings we do not have access
to each job's size. Instead, we know only the job size
distribution. In this setting, the Gittins policy is
known to minimize mean response time, but its complex
priority structure can be computationally intractable.
A much simpler alternative to Gittins is the shortest
expected remaining processing time (SERPT) policy.
While SERPT is a natural extension of SRPT to unknown
job sizes, it is unknown how close SERPT is to
optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vardoyan:2019:SAQ,
author = "Gayane Vardoyan and Saikat Guha and Philippe Nain and
Don Towsley",
title = "On the Stochastic Analysis of a Quantum Entanglement
Switch",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "27--29",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a quantum entanglement switch that serves k
users in a star topology. We model variants of the
system using continuous-time Markov chains (CTMCs) and
obtain expressions for switch capacity and the expected
number of qubits stored in memory at the switch. Using
CTMCs allows us to obtain a number of analytic results
for systems in which the links are homogeneous or
heterogeneous and for switches that have infinite or
finite buffer sizes. In addition, we can easily model
the effects of decoherence of quantum states using this
technique. From numerical observations, we discover
that decoherence has little effect on capacity and
expected number of stored qubits for homogeneous
systems. For heterogeneous systems, especially those
operating close to stability constraints, buffer size
and decoherence can significantly affect performance.
We also learn that, in general, increasing the buffer
size from one to two qubits per link is advantageous to
most systems, while increasing the buffer size further
yields diminishing returns.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2019:NSC,
author = "Giuliano Casale and Peter G. Harrison and Ong Wai
Hong",
title = "Novel Solutions for Closed Queueing Networks with
Load-Dependent Stations",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "30--32",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load-dependent closed queueing networks are difficult
to approximate since their analysis requires to
consider state-dependent service demands. Standard
evaluation techniques, such as mean-value analysis, are
not equally efficient in the load-dependent setting,
where mean queue-lengths are insufficient alone to
recursively determine the model equilibrium
performance. As such, novel exact techniques to address
this class of models can benefit performance
engineering practice by offering alternative trade-offs
between accuracy and computational cost. In this paper,
we derive novel exact solutions for the normalizing
constant of state probabilities in the load-dependent
setting. For single-class load-dependent models, we
provide an explicit exact formula for the normalizing
constant that is valid for models with arbitrary
load-dependent rates. From this result, we derive two
novel integral forms for the normalizing constant in
multiclass load-dependent models, which involve
integration in the real and complex domains, leading to
novel numerical approximations. The paper also
illustrates through experiments the computational gains
and accuracy of the obtained expressions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Somashekar:2019:TLT,
author = "Gagan Somashekar and Mohammad Delasay and Anshul
Gandhi",
title = "Tighter {Lyapunov} Truncation for Multi-Dimensional
Continuous Time {Markov} Chains with Known Moments",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "33--35",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Continuous Time Markov chains (CTMCs) are widely used
to model and analyze networked systems. A common
analysis approach is to solve the system of balance
equations governing the state transitions of a CTMC to
obtain its steady-state probability distribution, and
use the state probabilities to derive or compute
various performance measures. In many systems, the
state space of the underlying CTMC is infinite and
multi-dimensional with state-dependent transitions;
exact analysis of such models is challenging. For
example, the exact probability distribution of the
number of jobs in the Discriminatory Processor Sharing
(DPS) system, first proposed by Kleinrock in 1967 [4],
is still an open challenge. Likewise, obtaining the
exact state probabilities of quasi-birth-and-death
(QBD) processes with level-dependent transitions is
known to be challenging [1]; QBDs are infinite state
space multi-dimensional Markov chains in which states
are organized into levels and transitions are skip-free
between the levels.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lange:2019:HTA,
author = "Daniela Hurtado Lange and Siva Theja Maguluri",
title = "Heavy-traffic Analysis of the Generalized Switch under
Multidimensional State Space Collapse",
journal = j-SIGMETRICS,
volume = "47",
number = "2",
pages = "36--38",
month = sep,
year = "2019",
CODEN = "????",
DOI = "https://doi.org/10.1145/3374888.3374902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Dec 11 07:39:06 MST 2019",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The drift method was recently developed to study
performance of queueing systems in heavy-traffic [1].
It has been used to analyze several queueing systems,
including some where the Complete Resource Pooling
(CRP) condition is not satisfied, like the input-queued
switch [4]. In this paper we study the generalized
switch operating under MaxWeight using the drift
method. The generalized switch is a queueing system
that was first introduced by [5], and can be thought of
as extension of several single-hop queueing systems,
such as the input-queued switch and ad hoc wireless
networks. When the CRP condition is not satisfied, we
prove that there is a multidimensional state space
collapse to a cone and we compute bounds on a linear
combination of the queue lengths that are tight in
heavy-traffic. This work generalizes some of the
results obtained by [1] and the results from [4], since
the queueing systems studied there are particular cases
of the generalized switch.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Azizan:2020:OAL,
author = "Navid Azizan",
title = "Optimization Algorithms for Large-Scale Systems: From
Deep Learning to Energy Markets",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "2--5",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380910",
abstract = "Brief Biography: Navid Azizan is a fifth-year PhD
candidate in Computing and Mathematical Sciences (CMS)
at the California Institute of Technology (Caltech),
where he is co-advised by Adam Wierman and Babak
Hassibi, and is a member of multiple research
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comden:2020:AOD,
author = "Joshua Comden",
title = "Algorithms for Online and Distributed Optimization and
their Applications",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "6--9",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380911",
abstract = "Brief Biography: Joshua Comden is currently a Ph.D.
Candidate at Stony Brook University studying Operations
Research in the Department of Applied Mathematics and
Statistics and is expected to graduate in December
2019. He received his M.S. in Operations \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dipietro:2020:PMO,
author = "Salvatore Dipietro",
title = "Performance Modelling and Optimisation of {NoSQL}
Database Systems",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "10--13",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380912",
abstract = "Salvatore Dipietro is a final-year PhD candidate in
Computing at Imperial College London. His current
research focus is on performance modelling and
optimization of NoSQL database systems. His work is
supported by HiPEDS centre for doctoral training,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Javadi:2020:AAD,
author = "Seyyed Ahmad Javadi",
title = "Analytical Approaches for Dynamic Scheduling in Cloud
Environments",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "14--16",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380913",
abstract = "Brief Biography: Seyyed Ahmad Javadi is a researcher
in the Computer Laboratory (CompAcctSys group1) at the
University of Cambridge, His primary research interests
include cloud computing and the Internet of Things
(IoT). His current research involves \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2020:VSC,
author = "Qian Li",
title = "Vision-based Sensor Coverage in Uncertain Geometric
Domains",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "17--19",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380914",
abstract = "Brief Biography: Qian Li was born in Shandong, China,
in 1989. She received the bachelors degree in
mathematics and applied mathematics and minor bachelors
degree in Finance from the Tianjin University and
Nankai University (Tianjin, China) in 2012, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pourghassemi:2020:SDA,
author = "Behnam Pourghassemi",
title = "Scalable Dynamic Analysis of Browsers for Privacy and
Performance",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "20--23",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380915",
abstract = "Brief Biography: Behnam Pourghassemi is a fifth-year
Ph.D. student in Computer Engineering at the University
of California, Irvine. His research primarily revolves
around performance analysis and privacy on the web
including work-load characterization of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2020:DDS,
author = "Lili Su",
title = "Defending Distributed Systems Against Adversarial
Attacks: Consensus, Consensus-based Learning, and
Statistical Learning",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "24--27",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380916",
abstract = "Brief Biography: I am a postdoc in the Computer
Science and Artificial Intelligence Laboratory (CSAIL)
at MIT, hosted by Professor Nancy Lynch. She received a
Ph.D. in Electrical and Computer Engineering from the
University of Illinois at Urbana-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wajahat:2020:CDM,
author = "Muhammad Wajahat",
title = "Cost-Efficient Dynamic Management of Cloud Resources
through Supervised Learning",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "28--30",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380917",
abstract = "Brief Biography: Muhammad Wajahat is a PhD candidate
in the department of Computer Science at Stony Brook
University. He works in the Performance Analysis of
Computer Systems (PACE) Lab, under the supervision of
Dr. Anshul Gandhi. Before joining \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{YU:2020:NCS,
author = "Pei-Duo YU",
title = "Network Centralities as Statistical Inference for
Large Networks: Combinatorics, Probability and
Efficient Graph Algorithms",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "31--33",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380918",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380918",
abstract = "Brief Biography: Pei Duo YU received his Ph.D. degree
in Computer Science at City University of Hong Kong
(CityU) under the supervision of Dr. Chee Wei TAN.
Before joining CityU in 2016, Pei Duo YU received M.Sc.
in Applied Mathematics at National Chiao \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2020:AOLa,
author = "Xingyu Zhou",
title = "Asymptotically Optimal Load Balancing: Theory and
Algorithms",
journal = j-SIGMETRICS,
volume = "47",
number = "3",
pages = "34--37",
month = jan,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3380908.3380919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 27 06:46:01 MST 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3380908.3380919",
abstract = "Brief Biography: Xingyu Zhou is a Ph.D. student at the
ECE department of Ohio State University, advised by
Prof. Ness Shroff. He is currently a Presidential
Fellow, the most prestigious award at OSU. His primary
research focus is on load balancing in \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Palani:2020:OMS,
author = "Kartik Palani and David M. Nicol",
title = "Optimal Monitoring Strategies for Critical
Infrastructure Networks",
journal = j-SIGMETRICS,
volume = "47",
number = "4",
pages = "4--7",
month = apr,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3397776.3397778",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue May 5 13:55:33 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3397776.3397778",
abstract = "Given a choice among multiple security monitoring
solutions and multiple locations to deploy them,what
strategy best protects the network? What metric is used
to compare different securing strategies? What
constraints make it harder/easier to secure \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padhee:2020:IUP,
author = "Malhar Padhee and Reetam Sen Biswas and Anamitra Pal
and Kaustav Basu and Arunabha Sen",
title = "Identifying Unique Power System Signatures for
Determining Vulnerability of Critical Power System
Assets",
journal = j-SIGMETRICS,
volume = "47",
number = "4",
pages = "8--11",
month = apr,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3397776.3397779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue May 5 13:55:33 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3397776.3397779",
abstract = "In this paper, the finer granularity of phasor
measurement unit (PMU) data is exploited to develop a
data-driven approach for accurate health assessment of
large power transformers(LPTs). There research
demonstrates how variations in signal-to-\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oostenbrink:2020:MWP,
author = "Jorik Oostenbrink and Fernando A. Kuipers",
title = "A Moment of Weakness: Protecting Against Targeted
Attacks Following a Natural Disaster",
journal = j-SIGMETRICS,
volume = "47",
number = "4",
pages = "12--15",
month = apr,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3397776.3397780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue May 5 13:55:33 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3397776.3397780",
abstract = "By targeting communication and power networks,
malicious actors can significantly disrupt our society.
As networks are more vulnerable after a natural
disaster, this moment of weakness may be exploited to
disrupt the network even further. However, the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pournaras:2020:CFI,
author = "Evangelos Pournaras and Riccardo Taormina and Manish
Thapa and Stefano Galelli and Venkata Palleti and
Robert Kooij",
title = "Cascading Failures in Interconnected Power-to-Water
Networks",
journal = j-SIGMETRICS,
volume = "47",
number = "4",
pages = "16--20",
month = apr,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3397776.3397781",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue May 5 13:55:33 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3397776.3397781",
abstract = "The manageability and resilience of critical
infrastructures, such as power and water networks, is
challenged by their increasing interdependence and
interconnectivity. Power networks often experience
cascading failures, i.e. blackouts, that have
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2020:SPD,
author = "Yushi Tan and Arindam K. Das and Mareldi Ahumada-Paras
and Payman Arabshahi and Daniel S. Kirschen",
title = "Scheduling Post-disaster Repairs in Electricity
Distribution Networks with Uncertain Repair Times",
journal = j-SIGMETRICS,
volume = "47",
number = "4",
pages = "21--24",
month = apr,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3397776.3397782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue May 5 13:55:33 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3397776.3397782",
abstract = "Natural disasters, such as hurricanes, large wind and
ice storms, typically require the repair of a large
number of components in electricity distribution
networks. Since power cannot be restored before the
completion of repairs, optimally scheduling \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2020:APT,
author = "Krishna Kant",
title = "Advanced Persistent Threats in Autonomous Driving",
journal = j-SIGMETRICS,
volume = "47",
number = "4",
pages = "25--28",
month = apr,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3397776.3397783",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue May 5 13:55:33 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3397776.3397783",
abstract = "In this paper, we consider the problem of detecting
performance related advanced persistent threats on a
system of connected automated vehicles (CAVs) possibly
mixed with connected, but manually driven vehicles
(CMVs), operating over an urban area or \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Banerjee:2020:ULA,
author = "Siddhartha Banerjee and Daniel Freund",
title = "Uniform Loss Algorithms for Online Stochastic
Decision-Making With Applications to Bin Packing",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "1--2",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410050",
abstract = "We consider a general class of finite-horizon online
decision-making problems, where in each period a
controller is presented a stochastic arrival and must
choose an action from a set of permissible actions, and
the final objective depends only on the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wei:2020:OPD,
author = "Xiaohan Wei and Hao Yu and Michael J. Neely",
title = "Online Primal-Dual Mirror Descent under Stochastic
Constraints",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "3--4",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410051",
abstract = "We consider online convex optimization with stochastic
constraints where the objective functions are
arbitrarily time-varying and the constraint functions
are independent and identically distributed (i.i.d.)
over time. Both the objective and constraint \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Im:2020:DWF,
author = "Sungjin Im and Benjamin Moseley and Kamesh Munagala
and Kirk Pruhs",
title = "Dynamic Weighted Fairness with Minimal Disruptions",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "5--6",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410052",
abstract = "In this paper, we consider the following dynamic fair
allocation problem: Given a sequence of job arrivals
and departures, the goal is to maintain an
approximately fair allocation of the resource against a
target fair allocation policy, while minimizing
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2020:OLO,
author = "Lin Yang and Mohammad H. Hajiesmaili and Ramesh
Sitaraman and Adam Wierman and Enrique Mallada and Wing
S. Wong",
title = "Online Linear Optimization with Inventory Management
Constraints",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "7--8",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410053",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410053",
abstract = "This paper considers the problem of online linear
optimization with inventory management constraints.
Specifically, we consider an online scenario where a
decision maker needs to satisfy her time-varying demand
for some units of an asset, either from a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2020:OOP,
author = "Yiheng Lin and Gautam Goel and Adam Wierman",
title = "Online Optimization with Predictions and Non-convex
Losses",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "9--10",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410054",
abstract = "We study online optimization in a setting where an
online learner seeks to optimize a per-round hitting
cost, which may be nonconvex, while incurring a
movement cost when changing actions between rounds. We
ask: under what general conditions is it \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2020:MDO,
author = "Xiaoqi Tan and Bo Sun and Alberto Leon-Garcia and Yuan
Wu and Danny H. K. Tsang",
title = "Mechanism Design for Online Resource Allocation: a
Unified Approach",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "11--12",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410055",
abstract = "This paper concerns the mechanism design for online
resource allocation in a strategic setting. In this
setting, a single supplier allocates capacity-limited
resources to requests that arrive in a sequential and
arbitrary manner. Each request is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alijani:2020:PMP,
author = "Reza Alijani and Siddhartha Banerjee and Sreenivas
Gollapudi and Kamesh Munagala and Kangning Wang",
title = "Predict and Match: Prophet Inequalities with Uncertain
Supply",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "13--14",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410056",
abstract = "We consider the problem of selling perishable items to
a stream of buyers in order to maximize social welfare.
A seller starts with a set of identical items, and each
arriving buyer wants any one item, and has a valuation
drawn i.i.d. from a known \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhattacharjee:2020:FLR,
author = "Rajarshi Bhattacharjee and Subhankar Banerjee and
Abhishek Sinha",
title = "Fundamental Limits on the Regret of Online
Network-Caching",
journal = j-SIGMETRICS,
volume = "48",
number = "1",
pages = "15--16",
month = jul,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3410048.3410057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun Jul 12 08:01:19 MDT 2020",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/abs/10.1145/3410048.3410057",
abstract = "Optimal caching of files in a content distribution
network (CDN) is a problem of fundamental and growing
commercial interest. Although many different caching
algorithms are in use today, the fundamental
performance limits of the network caching \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2020:SIW,
author = "Mark S. Squillante",
title = "Special Issue on {The Workshop on MAthematical
performance Modeling and Analysis (MAMA 2020)}",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "2--2",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439604",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2020:UGC,
author = "Ping-En Lu and Cheng-Shang Chang",
title = "Using Graph Convolutional Networks to Compute
Approximations of Dominant Eigenvectors",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "3--5",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439605",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439605",
abstract = "Graph Convolutional Networks (GCN) have been very
popular for the network embedding problem that maps
nodes in a graph to vectors in a Euclidean space. In
this short paper, we show that a special class of GCNs
compute approximations of dominant \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhattacharjee:2020:CAM,
author = "Rajarshi Bhattacharjee and Abhishek Sinha",
title = "Competitive Algorithms for Minimizing the Maximum
Age-of-Information",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "6--8",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439606",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439606",
abstract = "In this short paper, we consider the problem of
designing a near-optimal competitive scheduling policy
to maximize the freshness of available information
uniformly across N mobile users. Motivated by the
unreliability and non-stationarity of the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Levy:2020:WCA,
author = "Hanoch Levy and Jhonatan Tavori",
title = "Worst Case Attacks on Distributed Resources Systems",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "9--11",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439607",
abstract = "How should an attacker, who wishes to hurt (deny)
service, attack resources on a geographically
distributed system in order to maximize the damage
inflicted? Should attack efforts focus on a small
number of regions (sites) or rather spread over many
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2020:PAO,
author = "Eitan Bachmat and Sveinung Erland",
title = "Performance analysis, Optimization and Optics",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "12--14",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439608",
abstract = "We introduce some methods and concepts of optics into
performance analysis and optimization",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Datar:2020:RPC,
author = "Mandar Datar and Eitan Altman and Ghilas Ferrat",
title = "Routing into parallel collision channels",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "15--17",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439609",
abstract = "We study a Medium Access game modeled as a splittable
atomic routing game in a parallel link topology. Each
player has to decide how to split her traffic among the
links. We take the expected loss probability of a
player as her cost and consider various \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fricker:2020:MFA,
author = "Christine Fricker and Hanene Mohamed and Cedric
Bourdais",
title = "A mean field analysis of a stochastic model for
reservation in car-sharing systems",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "18--20",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439610",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439610",
abstract = "Over the past decade, vehicle-sharing systems have
appeared as a new answer to mobility challenges, like
reducing congestion, pollution or travel time for
numerous cities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haverkort:2020:MLD,
author = "Boudewijn R. Haverkort and Felix Finkbeiner and
Pieter-Tjerk de Boer",
title = "Machine Learning Data Center Workloads Using
Generative Adversarial Networks",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "21--23",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439611",
abstract = "In this paper we study the applicability of generative
adversarial networks (GANs) for the description and
generation of workloads for data centers. GANs are
advanced neural networks that can learn complex
likelihood functions and can sample from them.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2020:CTO,
author = "Daniel S. Menasch{\'e} and Mark Shifrin and Eduardo
Hargreaves",
title = "Caches and Timelines Operate Under Heavy Traffic",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "24--26",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439612",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439612",
abstract = "The heavy traffic regime is a regime wherein system
resources are always busy. As caches and social network
timelines are intrinsically always busy, i.e., their
space-shared resources are always utilized, the goal of
this paper is to evaluate the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kesidis:2020:TGQ,
author = "George Kesidis and Takis Konstantopoulos",
title = "{TB/GI/1} queues with arrival traffic envelopes",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "27--29",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439613",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439613",
abstract = "Consider a queueing system where the job service times
are not known upon arrival; e.g., a transmission server
of a wireless channel where packet transmission times
are random, or a virtual machine handling a stream of
tasks whose execution times are \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jaleel:2020:GPD,
author = "Jazeem Abdul Jaleel and Alexander Wickeham and Sherwin
Doroudi and Kristen Gardner",
title = "A General {``Power-of-$d$}'' Dispatching Framework for
Heterogeneous Systems",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "30--32",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439614",
abstract = "Large-scale systems are everywhere, and deciding how
to dispatch an arriving job to one of the many
available servers is crucial to obtaining low response
time. One common scalable dispatching paradigm is the
``power of d,'' in which the dispatcher \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2020:OMS,
author = "Ziv Scully and Isaac Grosof and Mor Harchol-Balter",
title = "Optimal Multiserver Scheduling with Unknown Job Sizes
in Heavy Traffic",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "33--35",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439615",
abstract = "We consider scheduling to minimize mean response time
of the M/G/k queue with unknown job sizes. In the
singleserver k = 1 case, the optimal policy is the
Gittins policy, but it is not known whether Gittins or
any other policy is optimal in the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2020:ODC,
author = "Yingdong Lu and Mark S. Squillante and Tonghoon Suk",
title = "Optimal Delay-Cost Scheduling Control in Fluid Models
of General $ n \times n $ Input-Queued Switches",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "36--38",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439616",
abstract = "Input-queued switch (IQS) architectures are widely
used in modern computer/communication networks. The
optimal scheduling control of these high-speed,
low-latency networks is critical for our understanding
of fundamental design and performance issues \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Araldo:2020:ASI,
author = "Andrea Araldo and Ivona Brandic and Stefan Schmid",
title = "{ACM SIGMETRICS} International Workshop on Distributed
Cloud Computing {(DCC)}",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "39--40",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439617",
abstract = "The International Workshop on Distributed Cloud
Computing (DCC) is an interdisciplinary forum on
distributed systems, algorithms as well as networking
and cloud computing. DCC 2020 was co-located with
SIGMETRICS 2020, in the week of June 8-12, 2020. The
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balouek-Thomert:2020:HCC,
author = "Daniel Balouek-Thomert and Ivan Rodero and Manish
Parashar",
title = "Harnessing the Computing Continuum for Urgent
Science",
journal = j-SIGMETRICS,
volume = "48",
number = "2",
pages = "41--46",
month = nov,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3439602.3439618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Feb 5 17:12:28 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3439602.3439618",
abstract = "Urgent science describes time-critical, data-driven
scientific work-flows that can leverage distributed
data sources in a timely way to facilitate important
decision making. While our capacity for generating data
is expanding dramatically, our ability \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:2020:MRP,
author = "Lianjie Shi and Xin Wang and Richard T. B. Ma",
title = "On Multi-Resource Procurement in {Internet} Access
Markets: Optimal Strategies and Market Equilibrium",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "4--5",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453954",
abstract = "With the increasing popularity and significance of
content delivery services, especially video streaming,
stringent Quality of Service (QoS) requirements have
been placed upon Internet content providers (CPs). As a
result, CPs have strong incentives to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hoque:2020:NAP,
author = "Mohammad A. Hoque and Ashwin Rao and Sasu Tarkoma",
title = "Network and Application Performance Measurement
Challenges on {Android} Devices",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "6--11",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453955",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453955",
abstract = "Modern mobile systems are optimized for
energy-efficient computation and communications, and
these optimizations affect the way they use the
network, and thus the performance of the applications.
Therefore, understanding network and application
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scherrer:2020:ISP,
author = "Simon Scherrer and Markus Legner and Adrian Perrig and
Stefan Schmid",
title = "Incentivizing Stable Path Selection in Future
{Internet} Architectures",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "12--13",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453956",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453956",
abstract = "By delegating path control to end-hosts, future
Internet architectures offer flexibility for path
selection. However, a concern arises that the
distributed routing decisions by endhosts, in
particular load-adaptive routing, can lead to
oscillations if \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Christensen:2020:LIL,
author = "Niels Christensen and Mark Glavind and Stefan Schmid
and Jir' Srba",
title = "Latte: Improving the Latency of Transiently Consistent
Network Update Schedules",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "14--26",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453957",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453957",
abstract = "Emerging software-defined and programmable networking
technologies enable more adaptive communication
infrastructures. However, leveraging these
flexibilities and operating networks more adaptively is
challenging, as the underlying infrastructure
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bronzino:2020:ISV,
author = "Francesco Bronzino and Paul Schmitt and Sara Ayoubi
and Guilherme Martins and Renata Teixeira and Nick
Feamster",
title = "Inferring Streaming Video Quality from Encrypted
Traffic: Practical Models and Deployment Experience",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "27--32",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453958",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453958",
abstract = "Inferring the quality of streaming video applications
is important for Internet service providers, but the
fact that most video streams are encrypted makes it
difficult to do so.We develop models that infer quality
metrics (i.e., startup delay and \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hurtado-Lange:2020:HTA,
author = "Daniela Hurtado-Lange and Siva Theja Maguluri",
title = "Heavy-traffic Analysis of the Generalized Switch under
Multidimensional State Space Collapse",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "33--34",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453959",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453959",
abstract = "Stochastic Processing Networks that model wired and
wireless networks, and other queueing systems, have
been studied in heavytraffic limit under the so-called
Complete Resource Pooling (CRP) condition. When the CRP
condition is not satisfied, heavy-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berg:2020:HPS,
author = "Benjamin Berg and Rein Vesilo and Mor Harchol-Balter",
title = "{heSRPT}: Parallel Scheduling to Minimize Mean
Slowdown",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "35--36",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453960",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453960",
abstract = "Modern data centers serve workloads which can exploit
parallelism. When a job parallelizes across multiple
servers it completes more quickly. However, it is
unclear how to share a limited number of servers
between many parallelizable jobs. In this paper
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gardner:2020:SLB,
author = "Kristen Gardner and Jazeem Abdul Jaleel and Alexander
Wickeham and Sherwin Doroudi",
title = "Scalable Load Balancing in the Presence of
Heterogeneous Servers",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "37--38",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453961",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453961",
abstract = "In large-scale computer systems, deciding how to
dispatch arriving jobs to servers is a primary factor
affecting system performance. Consequently, there is a
wealth of literature on designing, analyzing, and
evaluating the performance of load balancing \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2020:LOR,
author = "Wenkai Dai and Klaus-Tycho Foerster and David
Fuchssteiner and Stefan Schmid",
title = "Load-Optimization in Reconfigurable Networks:
Algorithms and Complexity of Flow Routing",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "39--44",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453962",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453962",
abstract = "Emerging reconfigurable data centers introduce the
unprecedented flexibility in how the physical layer can
be programmed to adapt to current traffic demands.
These reconfigurable topologies are commonly hybrid,
consisting of static and reconfigurable \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vardoyan:2020:CRB,
author = "Gayane Vardoyan and Saikat Guha and Philippe Nain and
Don Towsley",
title = "On the Capacity Region of Bipartite and Tripartite
Entanglement Switching",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "45--50",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453963",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "See erratum \cite{Vardoyan:2021:CRB}.",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453963",
abstract = "We study a quantum switch serving a set of users in a
star topology. The function of the switch is to create
bipartite or tripartite entangled state among users at
the highest possible rates at a fixed ratio. We model a
set of randomized switching \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panigrahy:2020:ASC,
author = "Nitish K. Panigrahy and Prithwish Basu and Don Towsley
and Ananthram Swami and Kin K. Leung",
title = "On the Analysis of Spatially Constrained Power of Two
Choice Policies",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "51--56",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453964",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453964",
abstract = "We consider a class of power of two choice based
assignment policies for allocating users to servers,
where both users and servers are located on a
two-dimensional Euclidean plane. In this framework, we
investigate the inherent tradeoff between the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2020:AOLb,
author = "Xingyu Zhou and Ness Shroff and Adam Wierman",
title = "Asymptotically Optimal Load Balancing in Large-scale
Heterogeneous Systems with Multiple Dispatchers",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "57--58",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453965",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453965",
abstract = "We consider the load balancing problem in large-scale
heterogeneous systems with multiple dispatchers. We
introduce a general framework called
Local-Estimation-Driven (LED). Under this framework,
each dispatcher keeps local (possibly outdated)
estimates \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Niu:2020:IAB,
author = "Jianyu Niu and Ziyu Wang and Fangyu Gai and Chen
Feng",
title = "Incentive Analysis of {Bitcoin-NG}, Revisited",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "59--60",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453966",
abstract = "Bitcoin-NG is among the first scalable blockchain
protocols by decoupling blockchain operation into two
planes: leader election and transaction serialization.
Its decoupling idea has inspired a new generation of
blockchain protocols. However, the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaze:2020:NSS,
author = "Rahul Vaze and Jayakrishnan Nair",
title = "Network Speed Scaling",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "61--62",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453967",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453967",
abstract = "Speed scaling for a network of servers represented by
a directed acyclic graph is considered. Jobs arrive at
a source server, with a specified destination server,
and are defined to be complete once they are processed
by all servers on any feasible path \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pokhrel:2020:RSF,
author = "Shiva Raj Pokhrel and Carey Williamson",
title = "A Rent-Seeking Framework for Multipath {TCP}",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "63--70",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453968",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453968",
abstract = "Network utility maximization (NUM) for Multipath TCP
(MPTCP) is a challenging task, since there is no
well-defined utility function for MPTCP [6]. In this
paper, we identify the conditions under which we can
use Kelly's NUM mechanism, and explicitly \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meng:2020:SWQ,
author = "Jingfan Meng and Long Gong and Jun (Jim) Xu",
title = "Sliding-Window {QPS (SW-QPS)}: a Perfect Parallel
Iterative Switching Algorithm for Input-Queued
Switches",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "71--76",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453969",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453969",
abstract = "In this work, we first propose a parallel batch
switching algorithm called Small-Batch
Queue-Proportional Sampling (SB-QPS). Compared to other
batch switching algorithms, SB-QPS significantly
reduces the batch size without sacrificing the
throughput \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Quan:2020:PCM,
author = "Guocong Quan and Atilla Eryilmaz and Jian Tan and Ness
Shroff",
title = "Prefetching and Caching for Minimizing Service Costs:
Optimal and Approximation Strategies",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "77--78",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453970",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453970",
abstract = "In practice, prefetching data strategically has been
used to improve caching performance. The idea is that
data items can either be cached upon request
(traditional approach) or prefetched into the cache
before the requests actually occur. The caching
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vardoyan:2020:EAI,
author = "Gayane Vardoyan and Saikat Guha and Philippe Nain and
Don Towsley",
title = "On the Exact Analysis of an Idealized Quantum Switch",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "79--80",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453971",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453971",
abstract = "Protocols that exploit quantum communication
technology offer two advantages: they can either extend
or render feasible the capabilities of their classical
counterparts, or they exhibit functionality entirely
unachievable through classical means alone. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gilman:2020:DPP,
author = "Guin Gilman and Samuel S. Ogden and Tian Guo and
Robert J. Walls",
title = "Demystifying the Placement Policies of the {NVIDIA
GPU} Thread Block Scheduler for Concurrent Kernels",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "81--88",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453972",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453972",
abstract = "In this work, we empirically derive the scheduler's
behavior under concurrent workloads for NVIDIA's
Pascal, Volta, and Turing microarchitectures. In
contrast to past studies that suggest the scheduler
uses a round-robin policy to assign thread blocks
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2020:FCN,
author = "Yuezhou Liu and Yuanyuan Li and Qian Ma and Stratis
Ioannidis and Edmund Yeh",
title = "Fair Caching Networks",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "89--90",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453973",
abstract = "We study fair content allocation strategies in caching
networks through a utility-driven framework, where each
request achieves a utility of its caching gain rate.
The resulting problem is NP-hard. Submodularity allows
us to devise a deterministic \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shioda:2020:DCB,
author = "Shigeo Shioda",
title = "Distribution of Consensus in a Broadcasting-based
Consensus-forming Algorithm",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "91--96",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453974",
abstract = "The consensus achieved in the consensus-forming
algorithm is not generally a constant but rather a
random variable, even if the initial opinions are the
same. In the present paper, we investigate the
statistical properties of the consensus in a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elahi:2020:FSM,
author = "Maryam Elahi and Andrea Marin and Sabina Rossi and
Carey Williamson",
title = "Frequency scaling in multilevel queues",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "97--98",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453975",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453975",
abstract = "In this paper, we study a variant of PS+PS multilevel
scheduling, which we call the PS+IS queue.
Specifically, we use Processor Sharing (PS) at both
queues, but with linear frequency scaling on the second
queue, so that the latter behaves like an \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bienkowski:2020:ODB,
author = "Marcin Bienkowski and David Fuchssteiner and Jan
Marcinkowski and Stefan Schmid",
title = "Online Dynamic {B}-Matching: With Applications to
Reconfigurable Datacenter Networks",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "99--108",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453976",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453976",
abstract = "This paper initiates the study of online algorithms
for the maximum weight b-matching problem, a
generalization of maximum weight matching where each
node has at most {b$>$}=1 adjacent matching edges. The
problem is motivated by emerging optical technologies
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2020:HTA,
author = "Yu Huang and Longbo Huang",
title = "Heavy Traffic Analysis of Approximate Max-Weight
Matching Algorithms for Input-Queued Switches",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "109--110",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453977",
abstract = "In this paper, we propose a class of approximation
algorithms for max-weight matching (MWM) policy for
input-queued switches, called expected 1-APRX. We
establish the state space collapse (SSC) result for
expected 1-APRX, and characterize its queue \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Malik:2020:RSI,
author = "Fehmina Malik and Manjesh K. Hanawal and Yezekael
Hayel and Jayakrishnan Nair",
title = "Revenue sharing on the {Internet}: a Case for Going
Soft on Neutrality Regulations",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "111--112",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453978",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453978",
abstract = "Revenue sharing contracts between Content Providers
(CPs) and Internet Service Providers (ISPs) can act as
leverage for enhancing the infrastructure of the
Internet. ISPs can be incentivised to make investments
in network infrastructure that improve \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pourghassemi:2020:ORS,
author = "Behnam Pourghassemi and Ardalan Amiri Sani and Aparna
Chandramowlishwaran",
title = "Only Relative Speed Matters: Virtual Causal
Profiling",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "113--119",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453979",
abstract = "Causal profiling is a novel and powerful profiling
technique that quantifies the potential impact of
optimizing a code segment on the program runtime. A key
application of causal profiling is to analyze what-if
scenarios which typically require a large \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2020:RIP,
author = "Myungsuk Kim and Myoungjun Chun and Duwon Hong and
Yoona Kim and Geonhee Cho and Dusol Lee and Jihong
Kim",
title = "{RealWear}: Improving Performance and Lifetime of
{SSDs} Using a {NAND} Aging Marker",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "120--121",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453980",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453980",
abstract = "NAND flash memory has revolutionized how we manage
data in modern digital systems, significant
improvements are needed in flash-based storage systems
to meet the requirements of emerging data-intensive
applications. In this paper, we address the problem
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2020:OCF,
author = "Yingdong Lu and Mark S. Squillante and Tonghoon Suk",
title = "Optimal Control in Fluid Models of $ n \times n $
Input-Queued Switches under Linear Fluid-Flow Costs",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "122--127",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453981",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453981",
abstract = "We consider a fluid model of n x n input-queued
switches with associated fluid-flow costs and derive an
optimal scheduling control policy to an infinite
horizon discounted control problem with a general
linear objective function of fluid cost. Our \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kar:2020:TOL,
author = "Sounak Kar and Robin Rehrmann and Arpan Mukhopadhyay
and Bastian Alt and Florin Ciucu and Heinz Koeppl and
Carsten Binnig and Amr Rizk",
title = "On the Throughput Optimization in Large-Scale
Batch-Processing Systems",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "128--129",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453982",
abstract = "We analyze a data-processing system with n clients
producing jobs which are processed in batches by m
parallel servers; the system throughput critically
depends on the batch size and a corresponding
sub-additive speedup function that arises due to
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bayat:2020:ZRN,
author = "Niloofar Bayat and Richard Ma and Vishal Misra and Dan
Rubenstein",
title = "Zero-Rating and Net Neutrality: Who Wins, Who Loses?",
journal = j-SIGMETRICS,
volume = "48",
number = "3",
pages = "130--135",
month = dec,
year = "2020",
CODEN = "????",
DOI = "https://doi.org/10.1145/3453953.3453983",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Mar 6 08:32:44 MST 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3453953.3453983",
abstract = "An objective of network neutrality is to design
regulations for the Internet and ensure that it remains
a public, open platform where innovations can thrive.
While there is broad agreement that preserving the
content quality of service falls under the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Knottenbelt:2021:MC,
author = "William Knottenbelt and Katinka Wolter",
title = "Message from the Chairs",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "2--2",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466828",
abstract = "This volume presents the proceedings of the 2nd
Symposium of Cryptocurrency Analysis (SOCCA 2020),
originally scheduled to be held in Milan, Italy, on
November 6, 2020. The COVID-19 pandemic has
necessitated, in common with many other conferences,
that \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weber:2021:KAD,
author = "Ingo Weber",
title = "{Keynote}: Analysing Data from Blockchains",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "3--3",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466829",
abstract = "Blockchain is a novel distributed ledger technology.
Through its features and smart contract capabilities, a
wide range of application areas opened up for
blockchain-based innovation [5]. In order to analyse
how concrete blockchain systems as well as \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coutinho:2021:CHI,
author = "Felipe Ribas Coutinho and Victor Pires and Claudio
Miceli and Daniel S. Menasche",
title = "Crypto-Hotwire: Illegal Blockchain Mining at Zero Cost
Using Public Infrastructures",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "4--7",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466830",
abstract = "Blockchains and cryptocurrencies disrupted the
conversion of energy into a medium of exchange.
Numerous applications for blockchains and
cryptocurrencies are now envisioned for purposes
ranging from inventory control to banking applications.
Naturally, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simoes:2021:BPT,
author = "Jefferson E. Simoes and Eduardo Ferreira and Daniel S.
Menasch{\'e} and Carlos A. V. Campos",
title = "Blockchain Privacy Through Merge Avoidance and Mixing
Services: a Hardness and an Impossibility Result",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "8--11",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466831",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466831",
abstract = "Cryptocurrencies typically aim at preserving the
privacy of their users. Different cryptocurrencies
preserve privacy at various levels, some of them
requiring users to rely on strategies to raise the
privacy level to their needs. Among those strategies,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oliveira:2021:ATC,
author = "Vinicius C. Oliveira and Julia Almeida Valadares and
Jose Eduardo A. Sousa and Alex Borges Vieira and Heder
Soares Bernardino and Saulo Moraes Villela and Glauber
Dias Goncalves",
title = "Analyzing Transaction Confirmation in {Ethereum} Using
Machine Learning Techniques",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "12--15",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466832",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466832",
abstract = "Ethereum has emerged as one of the most important
cryptocurrencies in terms of the number of
transactions. Given the recent growth of Ethereum, the
cryptocurrency community and researchers are interested
in understanding the Ethereum transactions \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gundlach:2021:PCT,
author = "Rowel Gundlach and Martijn Gijsbers and David Koops
and Jacques Resing",
title = "Predicting confirmation times of Bitcoin
transactions",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "16--19",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466833",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466833",
abstract = "We study the distribution of confirmation times of
Bitcoin transactions, conditional on the size of the
current memory pool. We argue that the time until a
Bitcoin transaction is confirmed resembles the time to
ruin in a corresponding Cramer--Lundberg \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stoepker:2021:RAB,
author = "Ivo Stoepker and Rowel Gundlach and Stella
Kapodistria",
title = "Robustness analysis of Bitcoin confirmation times",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "20--23",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466834",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466834",
abstract = "Bitcoin payments require a random amount of time to
get confirmed (i.e. to be grouped by the miners into a
block and to be added to the Bitcoin blockchain). In
[8, 11], the authors propose the modelling of the
Bitcoin confirmation time by the so-called \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sousa:2021:FUP,
author = "Jose Eduardo A. Sousa and Vinicius C. Oliveira and
Julia Almeida Valadares and Alex Borges Vieira and
Heder S. Bernardino and Saulo Moraes Villela and
Glauber Dias Goncalves",
title = "Fighting Under-price {DoS} Attack in {Ethereum} with
Machine Learning Techniques",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "24--27",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466835",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466835",
abstract = "Ethereum is one of the most popular cryptocurrency
currently and it has been facing security threats and
attacks. As a consequence, Ethereum users may
experience long periods to validate transactions.
Despite the maintenance on the Ethereum mechanisms,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vassio:2021:MOW,
author = "Luca Vassio and Zhi-Li Zhang and Danilo Giordano and
Abhishek Chandra",
title = "Message from the organizers of {WAIN}",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "28--28",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466837",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466837",
abstract = "We are pleased to welcome you to the 2nd Workshop on
AI in Networks and Distributed Systems. This year we
have expanded the scope of the workshop to include
applications of Machine Learning and AI not merely in
Networking, but also in Distributed \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hossen:2021:MTO,
author = "Md Rajib Hossen and Mohammad A. Islam",
title = "Mobile Task Offloading Under Unreliable Edge
Performance",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "29--32",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466838",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466838",
abstract = "Offloading resource-hungry tasks from mobile devices
to an edge server has been explored recently to improve
task com- pletion time as well as save battery energy.
The low la- tency computing resource from edge servers
are a perfect companion to realize \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Celenk:2021:MLB,
author = "{\"O}zge Celenk and Thomas Bauschert and Marcus
Eckert",
title = "Machine Learning based {KPI} Monitoring of Video
Streaming Traffic for {QoE} Estimation",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "33--36",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466839",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466839",
abstract = "Quality of Experience (QoE) monitoring of video
streaming traffic is crucial task for service
providers. Nowadays it is challenging due to the
increased usage of end-to-end encryption. In order to
overcome this issue, machine learning (ML) approaches
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wehner:2021:IWQ,
author = "Nikolas Wehner and Michael Seufert and Joshua Schuler
and Sarah Wassermann and Pedro Casas and Tobias
Hossfeld",
title = "Improving {Web} {QoE} Monitoring for Encrypted Network
Traffic through Time Series Modeling",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "37--40",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466840",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/cryptography2020.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466840",
abstract = "This paper addresses the problem of Quality of
Experience (QoE) monitoring for web browsing. In
particular, the inference of common Web QoE metrics
such as Speed Index (SI) is investigated. Based on a
large dataset collected with open web-measurement
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Markudova:2021:WMA,
author = "Dena Markudova and Martino Trevisan and Paolo Garza
and Michela Meo and Maurizio M. Munafo and Giovanna
Carofiglio",
title = "What's my App?: {ML}-based classification of {RTC}
applications",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "41--44",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466841",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466841",
abstract = "With the spread of broadband Internet, Real-Time
Communication (RTC) platforms have become increasingly
popular and have transformed the way people
communicate. Thus, it is fundamental that the network
adopts traffic management policies that ensure
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Higuchi:2021:FLP,
author = "Shunsuke Higuchi and Junji Takemasa and Yuki Koizumi
and Atsushi Tagami and Toru Hasegawa",
title = "Feasibility of {Longest Prefix Matching} using
{Learned Index Structures}",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "45--48",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/string-matching.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466842",
abstract = "This paper revisits longest prefix matching in IP
packet forwarding because an emerging data structure,
learned index, is recently presented. A learned index
uses machine learning to associate key-value pairs in a
key-value store. The fundamental idea \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonzalez:2021:UGM,
author = "Gast{\'o}n Garc{\'\i}a Gonz{\'a}lez and Pedro Casas
and Alicia Fern{\'a}ndez and Gabriel G{\'o}mez",
title = "On the Usage of Generative Models for Network Anomaly
Detection in Multivariate Time-Series",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "49--52",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466843",
abstract = "Despite the many attempts and approaches for anomaly
de- tection explored over the years, the automatic
detection of rare events in data communication networks
remains a com- plex problem. In this paper we introduce
Net-GAN, a novel approach to network \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marin:2021:CCQ,
author = "Andrea Marin and Carey Williamson",
title = "Cheating at Craps: a Quantitative Analysis",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "53--61",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466845",
abstract = "Craps is a simple dice game that is popular in casinos
around the world. While the rules for Craps, and its
mathematical analysis, are reasonably straightforward,
this paper instead focuses on the best ways to cheat at
Craps, by using loaded (biased) \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Masetti:2021:EMM,
author = "Giulio Masetti and Silvano Chiaradonna and Felicita
{Di Giandomenico} and William H. Sanders and Brett
Feddersen",
title = "Extending the {M{\"o}bius} Modeling Environment with
the Advanced Replication Operator",
journal = j-SIGMETRICS,
volume = "48",
number = "4",
pages = "62--67",
month = may,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3466826.3466846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu May 20 08:57:00 MDT 2021",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3466826.3466846",
abstract = "Mobius is well known as a modeling and evaluation
environment for performance and dependability
indicators. It has been conceived in a modular and
flexible fashion, to be easily expanded to incorporate
new features, formalisms and tools. The need of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arora:2021:OBB,
author = "Sanjeev Arora",
title = "Opening the {Black Box} of Deep Learning: Some Lessons
and Take-aways",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "1--1",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453910",
abstract = "Deep learning has rapidly come to dominate AI and
machine learning in the past decade. These successes
have come despite deep learning largely being a ``black
box.'' A small subdiscipline has grown up trying to
derive \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2021:FBG,
author = "Zhaowei Zhu and Jingxuan Zhu and Ji Liu and Yang Liu",
title = "Federated Bandit: a Gossiping Approach",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "3--4",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453919",
abstract = "We study Federated Bandit, a decentralized Multi-Armed
Bandit (MAB) problem with a set of N agents, who can
only communicate their local data with neighbors
described by a connected graph G. Each agent makes a
sequence \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cuvelier:2021:SEP,
author = "Thibaut Cuvelier and Richard Combes and Eric Gourdin",
title = "Statistically Efficient, Polynomial-Time Algorithms
for Combinatorial Semi-Bandits",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "5--6",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453926",
abstract = "We consider combinatorial semi-bandits over a set X
\subset (0,1)d where rewards are uncorrelated across
items. For this problem, the algorithm ESCB yields the
smallest known regret bound R(T) = O ( d (\ln m)2 (\ln
T) \over \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2021:IAC,
author = "Tongxin Li and Yue Chen and Bo Sun and Adam Wierman
and Steven Low",
title = "Information Aggregation for Constrained Online
Control",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "7--8",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3461737",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3461737",
abstract = "We consider a two-controller online control problem
where a central controller chooses an action from a
feasible set that is determined by time-varying and
coupling constraints, which depend on all past actions
and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchbinder:2021:OVM,
author = "Niv Buchbinder and Yaron Fairstein and Konstantina
Mellou and Ishai Menache and Joseph (Seffi) Naor",
title = "Online Virtual Machine Allocation with Lifetime and
Load Predictions",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "9--10",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib;
https://www.math.utah.edu/pub/tex/bib/virtual-machines.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456278",
abstract = "The cloud computing industry has grown rapidly over
the last decade, and with this growth there is a
significant increase in demand for compute resources.
Demand is manifested in the form of Virtual Machine
(VM) \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2021:NSI,
author = "Isaac Grosof and Kunhe Yang and Ziv Scully and Mor
Harchol-Balter",
title = "Nudge: Stochastically Improving upon {FCFS}",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "11--12",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460102",
abstract = "The First-Come First-Served (FCFS) scheduling policy
is the most popular scheduling algorithm used in
practice. Furthermore, its usage is theoretically
validated: for light-tailed job size distributions,
FCFS has weakly \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2021:ZQM,
author = "Weina Wang and Qiaomin Xie and Mor Harchol-Balter",
title = "Zero Queueing for Multi-Server Jobs",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "13--14",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453924",
abstract = "Cloud computing today is dominated by multi-server
jobs. These are jobs that request multiple servers
simultaneously and hold onto all of these servers for
the duration of the job. Multi-server jobs add a lot of
complexity to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2021:GPN,
author = "Ziv Scully and Isaac Grosof and Mor Harchol-Balter",
title = "The {Gittins Policy} is Nearly Optimal in the
{M/G/$k$} under Extremely General Conditions",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "15--16",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456281",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456281",
abstract = "The Gittins scheduling policy minimizes the mean
response in the single-server M/G/1 queue in a wide
variety of settings. Most famously, Gittins is optimal
when preemption is allowed and service requirements
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bijlani:2021:WDM,
author = "Ashish Bijlani and Umakishore Ramachandran and Roy
Campbell",
title = "Where did my {256 GB} go? {A} Measurement Analysis of
Storage Consumption on Smart Mobile Devices",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "17--18",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460108",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460108",
abstract = "This work presents the first-ever detailed and
large-scale measurement analysis of storage consumption
behavior of applications (apps) on smart mobile
devices. We start by carrying out a five-year
longitudinal static \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2021:MSW,
author = "Yue Zhang and Bayan Turkistani and Allen Yuqing Yang
and Chaoshun Zuo and Zhiqiang Lin",
title = "A Measurement Study of {Wechat} Mini-Apps",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "19--20",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460106",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460106",
abstract = "A new mobile computing paradigm, dubbed mini-app, has
been growing rapidly over the past few years since
being introduced by WeChat in 2017. In this paradigm, a
host app allows its end-users to install and run
mini-apps \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2021:PNP,
author = "Rachee Singh and David Tench and Phillipa Gill and
Andrew McGregor",
title = "{PredictRoute}: a Network Path Prediction Toolkit",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "21--22",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460107",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460107",
abstract = "Accurate prediction of network paths between arbitrary
hosts on the Internet is of vital importance for
network operators, cloud providers, and academic
researchers. We present PredictRoute, a system that
predicts \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akbari:2021:LBC,
author = "Iman Akbari and Mohammad A. Salahuddin and Leni Ven
and Noura Limam and Raouf Boutaba and Bertrand Mathieu
and Stephanie Moteau and Stephane Tuffin",
title = "A Look Behind the Curtain: Traffic Classification in
an Increasingly Encrypted {Web}",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "23--24",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/cryptography2020.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453921",
abstract = "Traffic classification is essential in network
management for operations ranging from capacity
planning, performance monitoring, volumetry, and
resource provisioning, to anomaly detection and
security. Recently, it has \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weng:2021:AZA,
author = "Wentao Weng and Weina Wang",
title = "Achieving Zero Asymptotic Queueing Delay for Parallel
Jobs",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "25--26",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456268",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456268",
abstract = "Zero queueing delay is highly desirable in large-scale
computing systems. Existing work has shown that it can
be asymptotically achieved by using the celebrated
Power-of-d-choices (Pod) policy with a probe overhead d
= \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raaijmakers:2021:ASR,
author = "Youri Raaijmakers and Sem Borst",
title = "Achievable Stability in Redundancy Systems",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "27--28",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456267",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456267",
abstract = "We investigate the achievable stability region for
redundancy systems and a quite general workload model
with different job types and heterogeneous servers,
reflecting job-server affinity relations which may
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kielanski:2021:AIS,
author = "Grzegorz Kielanski and Benny {Van Houdt}",
title = "On the Asymptotic Insensitivity of the Supermarket
Model in Processor Sharing Systems",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "29--30",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460100",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460100",
abstract = "The supermarket model is a popular load balancing
model where each incoming job is assigned to a server
with the least number of jobs among d randomly selected
servers. Several authors have shown that the large
scale \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Randone:2021:RMF,
author = "Francesca Randone and Luca Bortolussi and Mirco
Tribastone",
title = "Refining Mean-field Approximations by Dynamic State
Truncation",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "31--32",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460099",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460099",
abstract = "Mean-field models are an established method to analyze
large stochastic systems with N interacting objects by
means of simple deterministic equations that are
asymptotically correct when N tends to infinity.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gao:2021:TCC,
author = "Bingyu Gao and Haoyu Wang and Pengcheng Xia and Siwei
Wu and Yajin Zhou and Xiapu Luo and Gareth Tyson",
title = "Tracking Counterfeit Cryptocurrency End-to-end",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "33--34",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456282",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456282",
abstract = "With the growth of the cryptocurrency ecosystem, there
is expanding evidence that counterfeit cryptocurrency
has also appeared. In this paper, we empirically
explore the presence of counterfeit cryptocurrencies on
Ethereum and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2021:SDC,
author = "Weimin Chen and Xinran Li and Yuting Sui and Ningyu He
and Haoyu Wang and Lei Wu and Xiapu Luo",
title = "{SADPonzi}: Detecting and Characterizing {Ponzi}
Schemes in {Ethereum} Smart Contracts",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "35--36",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460105",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460105",
abstract = "Ponzi schemes are financial scams that lure users
under the promise of high profits. With the prosperity
of Bitcoin and blockchain technologies, there has been
growing anecdotal evidence that this classic fraud has
emerged in \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pourghassemi:2021:ACP,
author = "Behnam Pourghassemi and Jordan Bonecutter and Zhou Li
and Aparna Chandramowlishwaran",
title = "{adPerf}: Characterizing the Performance of
Third-party Ads",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "37--38",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453920",
abstract = "Online advertising (essentially display ads on
websites) has proliferated in the last decade to the
extent where it is now an integral part of the web. In
this paper, we apply an in-depth and first-of-a-kind
performance \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hon:2021:ASI,
author = "Hsiao-Wuen Hon",
title = "{AI} for System --- Infusing {AI} into Cloud Computing
Systems",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "39--40",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453911",
abstract = "In the past fifteen years, the most significant
paradigm shift in the computing industry is the
migration to cloud computing, which brings
unprecedented opportunities of digital transformation
to business, society, and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2021:SFI,
author = "Qingzhao Zhang and David Ke Hong and Ze Zhang and Qi
Alfred Chen and Scott Mahlke and Z. Morley Mao",
title = "A Systematic Framework to Identify Violations of
Scenario-dependent Driving Rules in Autonomous Vehicle
Software",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "43--44",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460101",
abstract = "Safety compliance is paramount to the safe deployment
of autonomous vehicle (AV) technologies in real-world
transportation systems. As AVs will share road
infrastructures with human drivers and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2021:SSG,
author = "Lishan Yang and Bin Nie and Adwait Jog and Evgenia
Smirni",
title = "{SUGAR}: Speeding Up {GPGPU} Application Resilience
Estimation with Input Sizing",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "45--46",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453917",
abstract = "As Graphics Processing Units (GPUs) are becoming a de
facto solution for accelerating a wide range of
applications, their reliable operation is becoming
increasingly important. One of the major challenges in
the domain of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2021:MMR,
author = "Xulong Tang and Mahmut Taylan Kandemir and Mustafa
Karakoy",
title = "Mix and Match: Reorganizing Tasks for Enhancing Data
Locality",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "47--48",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460103",
abstract = "Application programs that exhibit strong locality of
reference lead to minimized cache misses and better
performance in different architectures. In this paper,
we target task-based programs, and propose a novel
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weng:2021:OLB,
author = "Wentao Weng and Xingyu Zhou and R. Srikant",
title = "Optimal Load Balancing with Locality Constraints",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "49--50",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456279",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456279",
abstract = "Applications in cloud platforms motivate the study of
efficient load balancing under job-server constraints
and server heterogeneity. In this paper, we study load
balancing on a bipartite graph where left nodes
correspond \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rutten:2021:LBU,
author = "Daan Rutten and Debankur Mukherjee",
title = "Load Balancing Under Strict Compatibility
Constraints",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "51--52",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456275",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456275",
abstract = "Consider a system with N identical single-server
queues and M(N) task types, where each server is able
to process only a small subset of possible task types.
Arriving tasks select {d$>$}=2 random compatible
servers, and join \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellemans:2021:MWT,
author = "Tim Hellemans and Benny {Van Houdt}",
title = "Mean Waiting Time in Large-Scale and Critically Loaded
Power of d Load Balancing Systems",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "53--54",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460097",
abstract = "Mean field models are a popular tool used to analyse
load balancing policies. In some exceptional cases the
waiting time distribution of the mean field limit has
an explicit form. In other cases it can be computed as
the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anton:2021:IPH,
author = "Elene Anton and Urtzi Ayesta and Matthieu Jonckheere
and Ina Maria Verloop",
title = "Improving the Performance of Heterogeneous Data
Centers through Redundancy",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "55--56",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456274",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456274",
abstract = "We analyze the performance of redundancy in a
multi-type job and multi-type server system where PS is
implemented. We characterize the stability condition,
which coincides with that of a system where each job
type only \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2021:PPA,
author = "Xin Wang and Richard T. B. Ma",
title = "On Private Peering Agreements between Content and
Access Providers: a Contractual Equilibrium Analysis",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "57--58",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456277",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456277",
abstract = "Driven by the rapid growth of content traffic and the
demand for service quality, Internet content providers
(CPs) have started to bypass transit providers and
connect with access providers directly via private
peering \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fleder:2021:KWY,
author = "Michael Fleder and Devavrat Shah",
title = "{I} Know What You Bought At {Chipotle} for \$9.81 by
Solving a Linear Inverse Problem",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "59--60",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456273",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456273",
abstract = "We consider the question of identifying which set of
products are purchased and at what prices in a given
transaction by observing only the total amount spent in
the transaction, and nothing more. The ability to
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varma:2021:DPM,
author = "Sushil Mahavir Varma and Francisco Castro and Siva
Theja Maguluri",
title = "Dynamic Pricing and Matching for Two-Sided Markets
with Strategic Servers",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "61--62",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456272",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456272",
abstract = "Motivated by applications in online marketplaces such
as ridesharing, we study dynamic pricing and matching
in two-sided queues with strategic servers. We consider
a discrete-time process in which, heterogeneous
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2021:RMG,
author = "Jing Tang and Xueyan Tang and Andrew Lim and Kai Han
and Chongshou Li and Junsong Yuan",
title = "Revisiting Modified Greedy Algorithm for Monotone
Submodular Maximization with a Knapsack Constraint",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "63--64",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453925",
abstract = "Monotone submodular maximization with a knapsack
constraint is NP-hard. Various approximation algorithms
have been devised to address this optimization problem.
In this paper, we revisit the widely known modified
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2021:AAS,
author = "Kai Han and Shuang Cui and Tianshuai Zhu and Enpei
Zhang and Benwei Wu and Zhizhuo Yin and Tong Xu and
Shaojie Tang and He Huang",
title = "Approximation Algorithms for Submodular Data
Summarization with a Knapsack Constraint",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "65--66",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453922",
abstract = "Data summarization, a fundamental methodology aimed at
selecting a representative subset of data elements from
a large pool of ground data, has found numerous
applications in big data processing, such as social
network \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2021:CAO,
author = "Bo Sun and Ali Zeynali and Tongxin Li and Mohammad
Hajiesmaili and Adam Wierman and Danny H. K. Tsang",
title = "Competitive Algorithms for the Online Multiple
Knapsack Problem with Application to Electric Vehicle
Charging",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "67--68",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456271",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456271",
abstract = "We introduce and study a general version of the
fractional online knapsack problem with multiple
knapsacks, heterogeneous constraints on which items can
be assigned to which knapsack, and rate-limiting
constraints \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tassiulas:2021:EIS,
author = "Leandros Tassiulas",
title = "Enabling Intelligent Services at the Network Edge",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "69--70",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453912",
abstract = "The proliferation of novel mobile applications and the
associated AI services necessitates a fresh view on the
architecture, algorithms and services at the network
edge in order to meet stringent performance \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Foerster:2021:IDD,
author = "Klaus-Tycho Foerster and Janne H. Korhonen and Ami Paz
and Joel Rybicki and Stefan Schmid",
title = "Input-Dynamic Distributed Algorithms for Communication
Networks",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "71--72",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3453923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3453923",
abstract = "Consider a distributed task where the communication
network is fixed but the local inputs given to the
nodes of the distributed system may change over time.
In this work, we explore the following question: if
some of the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perivier:2021:RTA,
author = "No{\'e}mie P{\'e}rivier and Chamsi Hssaine and Samitha
Samaranayake and Siddhartha Banerjee",
title = "Real-time Approximate Routing for Smart Transit
Systems",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "73--74",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460096",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460096",
abstract = "The advent of ride-hailing platforms such as Lyft and
Uber has revolutionized urban mobility in the past
decade. Given their increasingly important role in
today's society, recent years have seen growing
interest in integrating \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Asgari:2021:BSO,
author = "Kamiar Asgari and Michael J. Neely",
title = "{Bregman}-style Online Convex Optimization with Energy
Harvesting Constraints",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "75--76",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456270",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456270",
abstract = "This paper considers online convex optimization (OCO)
problems where decisions are constrained by available
energy resources. A key scenario is optimal power
control for an energy harvesting device with a finite
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2021:PDH,
author = "Liren Yu and Jiaming Xu and Xiaojun Lin",
title = "The Power of {D}-hops in Matching Power-Law Graphs",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "77--78",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460098",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460098",
abstract = "This paper studies seeded graph matching for power-law
graphs. Assume that two edge-correlated graphs are
independently edge-sampled from a common parent graph
with a power-law degree distribution. A set of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2021:CHE,
author = "Yiguang Zhang and Jessy Xinyi Han and Ilica Mahajan
and Priyanjana Bengani and Augustin Chaintreau",
title = "Chasm in Hegemony: Explaining and Reproducing
Disparities in Homophilous Networks",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "79--80",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3460109",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3460109",
abstract = "In networks with a minority and a majority community,
it is well-studied that minorities are
under-represented at the top of the social hierarchy.
However, researchers are less clear about the
representation of minorities from \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hazimeh:2021:MGT,
author = "Ahmad Hazimeh and Adrian Herrera and Mathias Payer",
title = "{Magma}: a Ground-Truth Fuzzing Benchmark",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "81--82",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456276",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456276",
abstract = "High scalability and low running costs have made fuzz
testing the de facto standard for discovering software
bugs. Fuzzing techniques are constantly being improved
in a race to build the ultimate bug-finding tool.
However, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abanto-Leon:2021:SCL,
author = "Luis F. Abanto-Leon and Andreas B{\"a}uml and Gek Hong
(Allyson) Sim and Matthias Hollick and Arash Asadi",
title = "Stay Connected, Leave no Trace: Enhancing Security and
Privacy in {WiFi} via Obfuscating Radiometric
Fingerprints",
journal = j-SIGMETRICS,
volume = "49",
number = "1",
pages = "83--84",
month = jun,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543516.3456280",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:42:40 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543516.3456280",
abstract = "The intrinsic hardware imperfection of WiFi chipsets
manifests itself in the transmitted signal, leading to
a unique radiometric (radio frequency) fingerprint.
This fingerprint can be used as an additional means of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2021:SIW,
author = "Mark S. Squillante",
title = "Special Issue on {The Workshop on MAthematical
performance Modeling and Analysis (MAMA 2021)}",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "2--2",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512800",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512800",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2021:LAE,
author = "Yu Su and Jannie Yu and Vivek Anand and Adam Wierman",
title = "Learning-Augmented Energy-Aware Scheduling of
Precedence-Constrained Tasks",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "3--5",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512801",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512801",
abstract = "We study the scheduling problem of
precedence-constrained tasks to balance between
performance and energy consumption. To this point,
scheduling to balance performance and energy has been
limited to settings without dependencies between jobs.
In this \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liao:2021:PPO,
author = "Guocheng Liao and Yu Su and Juba Ziani and Adam
Wierman and Jianwei Huang",
title = "The Privacy Paradox and Optimal Bias-Variance
Trade-offs in Data Acquisition",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "6--8",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512802",
abstract = "While users claim to be concerned about privacy, often
they do little to protect their privacy in their online
actions. One prominent explanation for this ``privacy
paradox'' is that when an individual shares her data,
it is not just her privacy that is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tavori:2021:CVO,
author = "Jhonatan Tavori and Hanoch Levy",
title = "Continual Versus Occasional Spreading In Networks:
Modeling Spreading Thresholds In Epidemic Processes",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "9--11",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512803",
abstract = "Epidemic processes are widely used as an abstraction
for various real-world phenomena --- human infections,
computer viruses, rumors, information broadcasts, etc.
[5, 1, 3]. Under the SIR model
(susceptible-infected-removed/recovered) in finite
networks, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ancel:2021:MFA,
author = "Julien Ancel and Christine Fricker and Hanene
Mohamed",
title = "Mean field analysis for bike and e-bike sharing
systems",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "12--14",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512804",
abstract = "Electric bikes are deployed massively in preexisting
bike sharing system in order to attract new users and
replace cars on a larger scale (see [2]). But this
causes interactions between the two populations of
bikes. In this paper, we analyze a model of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miguelez:2021:RSG,
author = "Fernando Migu{\'e}lez and Josu Doncel and Urtzi
Ayesta",
title = "A Resource Sharing Game for the Freshness of Status
Updates",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "15--17",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512805",
abstract = "Timely information is a crucial factor in a wide range
of information, communication, and control systems. For
instance, in autonomous driving systems, the state of
the traffic and the location of the vehicles must be as
recent as possible. The Age of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2021:WDG,
author = "Ziv Scully and Lucas van Kreveld",
title = "When Does the {Gittins} Policy Have Asymptotically
Optimal Response Time Tail?",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "18--20",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512806",
abstract = "We consider scheduling in the M/G/1 queue with unknown
job sizes. It is known that the Gittins policy
minimizes mean response time in this setting. However,
the behavior of the tail of response time under Gittins
is poorly understood, even in the large-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2021:PAQ,
author = "Yingdong Lu",
title = "Performance Analysis of A Queueing System with Server
Arrival and Departure",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "21--23",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512807",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512807",
abstract = "In many systems, in order to fulfill demand (computing
or other services) that varies over time, service
capacities often change accordingly. In this paper, we
analyze a simple two dimensional Markov chain model of
a queueing system in which multiple \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rutten:2021:CSA,
author = "Daan Rutten and Debankur Mukherjee",
title = "Capacity Scaling Augmented With Unreliable Machine
Learning Predictions",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "24--26",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512808",
abstract = "Modern data centers suffer from immense power
consumption. As a result, data center operators have
heavily invested in capacity scaling solutions, which
dynamically deactivate servers if the demand is low and
activate them again when the workload \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferragut:2021:EVC,
author = "Andres Ferragut and Lucas Narbondo and Fernando
Paganini",
title = "{EDF} vehicle charging under deadline uncertainty",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "27--29",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512809",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512809",
abstract = "In this paper, we analyze the performance of the EDF
scheduling policy for charging electrical vehicles when
the exact deadlines are not known by the scheduler.
Instead, they are declared by users. We quantify the
effect of this uncertainty in a mean \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bayat:2021:REA,
author = "Niloofar Bayat and Cody Morrin and Yuheng Wang and
Vishal Misra",
title = "Rank estimation for (approximately) low-rank
matrices",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "30--32",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512810",
abstract = "In observational data analysis, e.g., causal
inference, one often encounters data sets that are
noisy and incomplete, but come from inherently ``low
rank'' (or correlated) systems. Examples include user
ratings of movies/products and term frequency
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramtin:2021:CDA,
author = "Amir Reza Ramtin and Don Towsley and Philippe Nain and
Edmundo {de Souza e Silva} and Daniel S. Menasche",
title = "Are Covert {DDoS} Attacks Facing Multi-Feature
Detectors Feasible?",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "33--35",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512811",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512811",
abstract = "We state and prove the square root scaling laws for
the amount of traffic injected by a covert attacker
into a network from a set of homes under the assumption
that traffic descriptors follow a multivariate Gaussian
distribution. We numerically evaluate \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2021:BMS,
author = "Ziv Scully",
title = "Bounding Mean Slowdown in Multiserver Systems",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "36--38",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512812",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512812",
abstract = "Recent progress in queueing theory has made it
possible to analyze the mean response time of
multiserver queueing systems under advanced scheduling
policies. However, this progress has so far been
limited to the metric of mean response time. In
practice,. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghosh:2021:UGE,
author = "Soumyadip Ghosh and Mark S. Squillante",
title = "Unbiased Gradient Estimation for Robust Optimization",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "39--41",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512813",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2021:SIW,
author = "Nicolas Gast and Neil Walton",
title = "Special Issue on the Workshop about Reinforcement
Learning in Networks and Queues {(RLNQ 2021)}",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "42--42",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512814",
abstract = "The workshop aims to revisit the development and the
application of reinforcement learning techniques in the
various application areas covered by the SIGMETRICS
conference. Topics include but are not limited to
queueing networks (scheduling, resource \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tessler:2021:RLD,
author = "Chen Tessler and Yuval Shpigelman and Gal Dalal and
Amit Mandelbaum and Doron Haritan Kazakov and Benjamin
Fuhrer and Gal Chechik and Shie Mannor",
title = "Reinforcement Learning for {Datacenter Congestion
Control}",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "43--46",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512815",
abstract = "We approach the task of network congestion control in
datacenters using Reinforcement Learning (RL).
Successful congestion control algorithms can
dramatically improve latency and overall network
throughput. Until today, no such learning-based
algorithms \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Robledo:2021:QQL,
author = "Francisco Robledo and Vivek Borkar and Urtzi Ayesta
and Konstantin Avrachenkov",
title = "{QWI}: {Q}-learning with {Whittle Index}",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "47--50",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512816",
abstract = "The Whittle index policy is a heuristic that has shown
remarkable good performance (with guaranteed asymptotic
optimality) when applied to the class of problems known
as multi-armed restless bandits. In this paper we
develop QWI, an algorithm based on Q-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Newton:2021:AOD,
author = "Conor Newton and Ayalvadi Ganesh and Henry Reeve",
title = "Asymptotic Optimality for Decentralised Bandits",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "51--53",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512817",
abstract = "We consider a large number of agents collaborating on
a multi-armed bandit problem with a large number of
arms. We present an algorithm which improves upon the
Gossip- Insert-Eliminate method of Chawla et al. [3].
We provide a regret bound which shows \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2021:CBA,
author = "Xin Liu and Bin Li and Pengyi Shi and Lei Ying",
title = "A Constrained Bandit Approach for Online Dispatching",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "54--56",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512818",
abstract = "Online dispatching refers to the process (or an
algorithm) that dispatches incoming jobs to available
servers in realtime. The problem arises in many
different fields. Examples include routing customer
calls to representatives in a call center, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Archer:2021:OBS,
author = "Christopher Archer and Siddhartha Banerjee and Mayleen
Cortez and Carrie Rucker and Sean R. Sinclair and Max
Solberg and Qiaomin Xie and Christina Lee Yu",
title = "{ORSuite}: Benchmarking Suite for Sequential
Operations Models",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "57--61",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512819",
abstract = "Reinforcement learning (RL) has received widespread
attention across multiple communities, but the
experiments have focused primarily on large-scale game
playing and robotics tasks. In this paper we introduce
ORSuite, an open-source library containing \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fanti:2021:ASS,
author = "Giulia Fanti",
title = "{ACM SIGMETRICS 2021 Student Research Competition}",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "62--62",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512820",
abstract = "Every year, the Association for Computation Machinery
(ACM) spearheads a series of Student Research
Competitions (SRCs) at ACM-sponsored or co-sponsored
conferences. These SRCs, which are sponsored by
Microsoft Research, provide undergraduate and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tuli:2021:SIP,
author = "Shreshth Tuli",
title = "{SplitPlace}: Intelligent Placement of Split Neural
Nets in Mobile Edge Environments",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "63--65",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512821",
abstract = "In recent years, deep learning models have become
ubiquitous in industry and academia alike. Modern deep
neural networks can solve one of the most complex
problems today, but coming with the price of massive
compute and storage requirements. This makes \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:2021:SZQ,
author = "Yige Hong",
title = "Sharp Zero-Queueing Bounds for Multi-Server Jobs",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "66--68",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512822",
abstract = "Multi-server jobs, which are jobs that occupy multiple
servers simultaneously during service, are prevalent in
today's computing clusters. But little is known about
the delay performance of systems with multi-server
jobs. In this paper, we consider \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghasemi:2021:ASA,
author = "Mahshid Ghasemi",
title = "{Auto-SDA}: Automated Video-based Social Distancing
Analyzer",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "69--71",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512823",
abstract = "Social distancing can reduce infection rates in
respiratory pandemics such as COVID-19, especially in
dense urban areas. To assess pedestrians' compliance
with social distancing policies, we use the pilot site
of the PAWR COSMOS wireless edge-cloud \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peng:2021:ERT,
author = "Edwin Peng",
title = "Exact Response Time Analysis of Preemptive Priority
Scheduling with Switching Overhead",
journal = j-SIGMETRICS,
volume = "49",
number = "2",
pages = "72--74",
month = sep,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3512798.3512824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:15 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3512798.3512824",
abstract = "The study of preemptive scheduling is essential to
computer systems [15, 12, 3, 4]. Motivated by this,
decades of queueing theory research have been done on
the subject [19, 18, 16, 13, 21, 8, 17, 2, 11, 20, 10,
1]. However, almost all queuing theoretic \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2021:SAS,
author = "Qingsong Liu and Wenfei Wu and Longbo Huang and
Zhixuan Fang",
title = "Simultaneously Achieving Sublinear Regret and
Constraint Violations for Online Convex Optimization
with Time-varying Constraints",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "4--5",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529114",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529114",
abstract = "In this paper, we develop a novel virtual-queue-based
online algorithm for online convex optimization (OCO)
problems with long-term and time-varying constraints
and conduct a performance analysis with respect to the
dynamic regret and constraint \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2021:CBS,
author = "Russell Lee and Yutao Zhou and Lin Yang and Mohammad
Hajiesmaili and Ramesh Sitaraman",
title = "Competitive Bidding Strategies for Online Linear
Optimization with Inventory Management Constraints",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "6--7",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529115",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529115",
abstract = "This paper develops competitive bidding strategies for
an online linear optimization problem with inventory
management constraints in both cost minimization and
profit maximization settings. In the minimization
problem, a decision maker should satisfy \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Song:2021:OLH,
author = "Jianhan Song and Gustavo de Veciana and Sanjay
Shakkottai",
title = "Online Learning for Hierarchical Scheduling to Support
Network Slicing in Cellular Networks",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "8--9",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529116",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529116",
abstract = "We study a learning-based hierarchical scheduling
framework in support of network slicing for cellular
networks. This addresses settings where users and/or
service classes are grouped into slices, and resources
are allocated hierarchically. The \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferragut:2021:SEC,
author = "Andres Ferragut and Lucas Narbondo and Fernando
Paganini",
title = "Scheduling {EV} charging with uncertain departure
times",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "10--15",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529117",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529117",
abstract = "In an EV charging facility, with multiple vehicles
requesting charge simultaneously, scheduling becomes
crucial to provide adequate service under vehicle
sojourn time constraints. However, these departure
times may not be known accurately, and typical
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scherrer:2021:APP,
author = "Simon Scherrer and Markus Legner and Adrian Perrig and
Stefan Schmid",
title = "An Axiomatic Perspective on the Performance Effects of
End-Host Path Selection",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "16--17",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529118",
abstract = "In various contexts of networking research, end-host
path selection has recently regained momentum as a
design principle. While such path selection has the
potential to increase performance and security of
networks, there is a prominent concern that it
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2021:OCN,
author = "Bai Liu and Eytan Modiano",
title = "Optimal Control for Networks with Unobservable
Malicious Nodes",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "18--19",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529119",
abstract = "With the rapid development of information technology,
modern network systems are becoming increasingly
complex and are increasingly vulnerable to attacks such
as Distributed Denial-of-Service (DDoS) attack.
However, existing network control algorithms \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramtin:2021:FSL,
author = "Amir Reza Ramtin and Philippe Nain and Daniel S.
Menasche and Don Towsley and Edmundo {de Souza e
Silva}",
title = "Fundamental Scaling Laws of Covert {DDoS} Attacks",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "20--21",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529120",
abstract = "The Internet has become an indispensable commodity in
the last several years. This achievement was parallel
to the growth of sophistication that home networks have
undergone, nowadays hosting a variety of devices such
as PCs, tablets, mobile phones and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chaturvedi:2021:ITA,
author = "Anya Chaturvedi and Chandra Chekuri and Andr{\'e}a W.
Richa and Matthias Rost and Stefan Schmid and Jamison
Weber",
title = "Improved Throughput for All-or-Nothing Multicommodity
Flows with Arbitrary Demands",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "22--27",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529121",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529121",
abstract = "Throughput is a main performance objective in
communication networks. This paper considers a
fundamental maximum throughput routing problem-the
all-or-nothing multicommodity flow (ANF) problem --- in
arbitrary directed graphs and in the practically
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2021:EFB,
author = "Xiandong Huang and Qinglin Wang and Shuyu Lu and
Ruochen Hao and Songzhu Mei and Jie Liu",
title = "Evaluating {FFT-based} Algorithms for Strided
Convolutions on {ARMv8} Architectures?",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "28--29",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529122",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529122",
abstract = "Convolutional Neural Networks (CNNs) have been widely
adopted in all kinds of artificial intelligence
applications. Most of the computational overhead of
CNNs is mainly spent on convolutions. An effective
approach to reducing the overhead is FFT-based
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vardoyan:2021:QPE,
author = "Gayane Vardoyan and Matthew Skrzypczyk and Stephanie
Wehner",
title = "On the Quantum Performance Evaluation of Two
Distributed Quantum Architectures",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "30--31",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529123",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529123",
abstract = "Distributed quantum applications impose requirements
on the quality of the quantum states that they consume.
When analyzing architecture implementations of quantum
hardware, characterizing this quality forms an
important factor in understanding their \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gilman:2021:CCM,
author = "Guin Gilman and Robert J. Walls",
title = "Characterizing Concurrency Mechanisms for {NVIDIA
GPUs} under Deep Learning Workloads (Extended
Abstract)",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "32--34",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529124",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529124",
abstract = "Hazelwood et al. observed that at Facebook data
centers, variations in user activity (e.g. due to
diurnal load) resulted in low utilization periods with
large pools of idle resources [4]. To make use of these
resources, they proposed using machine \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pacut:2021:ISD,
author = "Maciej Pacut and Wenkai Dai and Alexandre Labbe and
Klaus-Tycho Foerster and Stefan Schmid",
title = "Improved Scalability of Demand-Aware Datacenter
Topologies With Minimal Route Lengths and Congestion",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "35--36",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529125",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529125",
abstract = "The performance of more and more cloud-based
applications critically depends on the performance of
the interconnecting datacenter network. Emerging
reconfigurable datacenter networks have the potential
to provide an unprecedented throughput by \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cadas:2021:FCH,
author = "Arnaud Cadas and Josu Doncel and Jean-Michel Fourneau
and Ana Busic",
title = "Flexibility can Hurt Dynamic Matching System
Performance",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "37--42",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529126",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529126",
abstract = "We study the performance of stochastic matching models
with general compatibility graphs. Items of different
classes arrive to the system according to independent
Poisson processes. Upon arrival, an item is matched
with a compatible item according to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varma:2021:HTT,
author = "Sushil Mahavir Varma and Siva Theja Maguluri",
title = "A Heavy Traffic Theory of Two-Sided Queues",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "43--44",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529127",
abstract = "Motivated by emerging applications in online matching
platforms and marketplaces, we study a two-sided queue.
Customers and servers that arrive into a two-sided
queue depart as soon as they are matched.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaze:2021:SSM,
author = "Rahul Vaze and Jayakrishnan Nair",
title = "Speed Scaling with Multiple Servers under a Sum--Power
Constraint",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "45--50",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529128",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529128",
abstract = "The problem of scheduling jobs and choosing their
respective speeds with multiple servers under a
sum-power constraint to minimize the flow time + energy
is considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2021:FLD,
author = "Giuliano Casale and Peter G. Harrison and Wai Hong
Ong",
title = "Facilitating Load-Dependent Queueing Analysis Through
Factorization (Extended Abstract)",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "51--52",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529129",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529129",
abstract = "We construct novel exact and approximate solutions for
mean-value analysis and probabilistic evaluation of
closed queueing network models with limited
load-dependent (LLD) nodes. In this setting,
load-dependent functions are assumed to become constant
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zubeldia:2021:LTC,
author = "Martin Zubeldia and Michel Mandjes",
title = "Learning traffic correlations in multi-class queueing
systems by sampling queue lengths, with routing
applications",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "53--54",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529130",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529130",
abstract = "We consider a system of parallel single-server queues.
Work of different classes arrives as correlated
Gaussian processes with known drifts but unknown
covariance matrix, and it is deterministically routed
to the different queues according to some \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spang:2021:UTB,
author = "Bruce Spang",
title = "Updating the Theory of Buffer Sizing",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "55--56",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529131",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529131",
abstract = "Internet routers have packet buffers which reduce
packet loss during times of congestion. Sizing the
router buffer correctly is important: if a router
buffer is too small, it can cause high packet loss and
link under-utilization. If a buffer is too \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singhal:2021:CFR,
author = "Shiksha Singhal and Veeraruna Kavitha",
title = "Coalition Formation Resource Sharing Games in
Networks",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "57--58",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529132",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529132",
abstract = "Cooperative game theory deals with systems where
players want to cooperate to improve their payoffs. But
players may choose coalitions in a non-cooperative
manner, leading to a coalition-formation game. We
consider such a game with several players \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivaraman:2021:ENT,
author = "Vibhaalakshmi Sivaraman and Weizhao Tang and Shaileshh
Bojja Venkatakrishnan and Giulia Fanti and Mohammad
Alizadeh",
title = "The Effect of Network Topology on Credit Network
Throughput",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "59--60",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529133",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529133",
abstract = "The global economy relies on digital transactions
between entities who do not trust one another. Today,
such transactions are handled by intermediaries who
extract fees (e.g., credit card providers). A natural
question is how to build financial systems \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2021:ERC,
author = "Xusheng Chen and Shixiong Zhao and Ji Qi and Jianyu
Jiang and Haoze Song and Cheng Wang and Tsz On Li and
T-H. Hubert Chan and Fengwei Zhang and Xiapu Luo and
Sen Wang and Gong Zhang and Heming Cuih",
title = "Efficient and {DoS-resistant} Consensus for
Permissioned Blockchains",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "61--62",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529134",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529134",
abstract = "Existing permissioned blockchain systems designate a
fixed and explicit group of committee nodes to run a
consensus protocol that confirms the same sequence of
blocks among all nodes. Unfortunately, when such a
system runs on a large scale on the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jain:2021:SCM,
author = "Shubham Anand Jain and Shreyas Goenka and Divyam Bapna
and Nikhil Karamchandani and Jayakrishnan Nair",
title = "Sequential community mode estimation",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "63--64",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529135",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529135",
abstract = "Several applications in online learning involve
sequential sampling/polling of an underlying
population. A classical learning task in this space is
online cardinality estimation, where the goal is to
estimate the size of a set by sequential sampling of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berg:2021:CPA,
author = "Benjamin Berg and Justin Whitehouse and Benjamin
Moseley and Weina Wang and Mor Harchol-Balter",
title = "The Case for Phase-Aware Scheduling of Parallelizable
Jobs",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "65--66",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529136",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529136",
abstract = "Parallelizable workloads are ubiquitous and appear
across a diverse array of modern computer systems. Data
centers, supercomputers, machine learning clusters,
distributed computing frameworks, and databases all
process jobs designed to be parallelized \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fu:2021:EJS,
author = "Xinzhe Fu and Eytan Modiano",
title = "Elastic Job Scheduling with Unknown Utility
Functions",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "67--68",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529137",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529137",
abstract = "We consider a bipartite network consisting of job
schedulers and parallel servers. Jobs arrive at the
schedulers following stochastic processes with unknown
arrival rates, and get routed to the servers, which
execute the jobs with unknown service rates. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruuskanen:2021:IMF,
author = "Johan Ruuskanen and Tommi Berner and Karl-Erik Arzen
and Anton Cervin",
title = "Improving the Mean-Field Fluid Model of Processor
Sharing Queueing Networks for Dynamic Performance
Models in Cloud Computing",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "69--70",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529138",
abstract = "Resource management in cloud computing is a difficult
problem, as one needs to balance between adequate
service to clients and cost minimization in a dynamic
environment of interconnected components. To make
correct decisions in such an environment, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2021:OSP,
author = "Jonatha Anselmi and Bruno Gaujal and
Louis-S{\'e}bastien Rebuffi",
title = "Optimal Speed Profile of a {DVFS} Processor under Soft
Deadlines",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "71--72",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529139",
abstract = "Minimizing the energy consumption of embedded systems
with real-time execution constraints is becoming more
and more important. More functionalities and better
performance/ cost tradeoffs are expected from such
systems because of the increased use of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchholz:2021:RCE,
author = "Peter Buchholz",
title = "On the Representation of Correlated Exponential
Distributions by Phase Type Distributions",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "73--78",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529140",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529140",
abstract = "In this paper we present results for bivariate
exponential distributions which are represented by
phase type distributions. The paper extends results
from previous publications [3, 11] on this topic by
introducing new representations that require a
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vardoyan:2021:CRB,
author = "Gayane Vardoyan and Saikat Guha and Philippe Nain and
Don Towsley",
title = "On the Capacity Region of Bipartite and Tripartite
Entanglement Switching: Erratum",
journal = j-SIGMETRICS,
volume = "49",
number = "3",
pages = "79--80",
month = dec,
year = "2021",
CODEN = "????",
DOI = "https://doi.org/10.1145/3529113.3529141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Apr 18 11:23:16 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "See \cite{Vardoyan:2020:CRB}.",
URL = "https://dl.acm.org/doi/10.1145/3529113.3529141",
abstract = "In Section 4.3 (Analysis), the last few lines of the
proof of Claim 3 should be replaced with the
following",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2022:MCQ,
author = "Mor Harchol-Balter and Ziv Scully",
title = "The most common queueing theory questions asked by
computer systems practitioners",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "3--7",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543148",
abstract = "This document examines five performance questions
which are repeatedly asked by practitioners in
industry: (i) My system utilization is very low, so why
are job delays so high? (ii) What should I do to lower
job delays? \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2022:VCA,
author = "Chee Wei Tan",
title = "The Value of Cooperation: From {AIMD} to Flipped
Classroom Teaching",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "8--13",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543149",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543149",
abstract = "The well-known Additive Increase-Multiplicative
Decrease (AIMD) abstraction for network congestion
control was first published by Dah-Ming Chiu and Raj
Jain in their seminal work [4] in 1989 and soon played
a prominent \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xia:2022:TPM,
author = "Cathy H. Xia and Nanshan Chen and Priya Natarajan",
title = "Teaching Performance Modeling via Software and
Instructional Technology",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "14--19",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543150",
abstract = "Performance modeling and analysis has become a common
practice to assist the development of modern
information networks and service systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boudec:2022:PEP,
author = "Jean-Yves {Le Boudec}",
title = "Performance Evaluation: a Preparation for Statistics
and Data Science?",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "20--23",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543151",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543151",
abstract = "Lectures that use probability or statistics often
appear complex to students, sometimes because the
underlying stochastic models are not explicited.
Writing a stochastic simulation program is a common
exercise in a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Serazzi:2022:UCP,
author = "Giuseppe Serazzi",
title = "Updating the Content of Performance Analysis
Textbooks",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "24--27",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543152",
abstract = "Fifty years have passed since Performance Evaluation
(PE) was recognized as a discipline in its own right
even if closely linked to computer science. In this
period, computer systems, networks, applications and
services have \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Donatelli:2022:SIW,
author = "Susanna Donatelli and Giuliana Franceschinis",
title = "Special issue on the Workshop on {TOols for Stochastic
Modeling and Evaluation (TOSME 2021)}",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "28--28",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543154",
abstract = "This special issue collects the contributions of the
Workshop on TOols for Stochastic Modeling and
Evaluation, held virtually on November 12, 2021, in
conjunction with the 30th IFIP WG 7.3 Performance
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almousa:2022:CME,
author = "Salah Al-Deen Almousa and G{\textasciiacute}abor
Horv{\textasciiacute}ath and Ill {\textasciiacute}es
Horv{\textasciiacute}ath and Andr{\textasciiacute}as
M{\textasciiacute}esz{\textasciiacute}aros and Mikl
{\textasciiacute}os Telek",
title = "The {CME} method: Efficient numerical inverse
{Laplace} transformation with {Concentrated Matrix
Exponential} distribution",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "29--34",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543155",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543155",
abstract = "Numerical inverse Laplace transformation (NILT) is an
important tool in the field of system modelling and
performance analysis. The recently introduced CME
method has many important advantages over the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allmeier:2022:RTL,
author = "Sebastian Allmeier and Nicolas Gast",
title = "{rmf tool} --- A library to Compute (Refined) Mean
Field Approximation(s)",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "35--40",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543156",
abstract = "Mean field approximation is a powerful technique to
study the performance of large stochastic systems
represented as systems of interacting objects.
Applications include load balancing models, epidemic
spreading, cache \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Masetti:2022:TTS,
author = "Giulio Masetti and Leonardo Robol and Silvano
Chiaradonna and Felicita {Di Giandomenico}",
title = "{TAPAS}: a Tool for Stochastic Evaluation of Large
Interdependent Composed Models with Absorbing States",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "41--46",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543157",
abstract = "TAPAS is a new tool for efficient evaluation of
dependability and performability attributes of systems
composed of many interconnected components. The tool
solves homogeneous continuous time Markov chains
described by \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marzolla:2022:QNM,
author = "Moreno Marzolla",
title = "Queueing Networks and {Markov} Chains Analysis with
the {Octave} queueing package",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "47--52",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/gnu.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543158",
abstract = "Queueing networks and Markov chains are a widely used
modeling notation that has been successfully applied to
many kind of systems. In this paper we describe the
queueing package, a free software package for
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cortellessa:2022:SMR,
author = "Vittorio Cortellessa and Daniele {Di Pompeo} and
Vincenzo Stoico and Michele Tucci",
title = "Software Model Refactoring Driven by Performance
Antipattern Detection",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "53--58",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543159",
abstract = "The satisfaction of ever more stringent performance
requirements is one of the main reasons for software
evolution. However, determining the primary causes of
performance degradation is generally challenging,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Budde:2022:FFI,
author = "Carlos E. Budde",
title = "{FIG}: the {Finite Improbability Generator v1.3}",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "59--64",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543160",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543160",
abstract = "This work presents version 1.3 of the Finite
Improbability Generator (FIG): a statistical model
checker to estimate transient and steady-state
reachability properties in stochastic automata.
Specialised in rare event \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ballarini:2022:CES,
author = "Paolo Ballarini and Beno{\^\i}t Barbot",
title = "{Cosmos}: Evolution of a Statistical Model Checking
Platform",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "65--69",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543161",
abstract = "Cosmos is a statistical model checker for Hybrid
Automata Stochastic Logic (HASL).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sheldon:2022:TSM,
author = "Matthew Sheldon and Giuliano Casale",
title = "{TauSSA}: Simulating {Markovian} Queueing Networks
with {Tau} Leaping",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "70--75",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543162",
abstract = "In this paper, we present TauSSA, a discrete-event
simulation tool for stochastic queueing networks
integrated in the LINE solver. TauSSA combines
Gillespie's stochastic simulation algorithm with tau
leaping, a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Llado:2022:POP,
author = "Catalina M. Llado",
title = "{PIPE 2.7} overview: a {Petri} net tool for
performance modeling and evaluation",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "76--80",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543163",
abstract = "The Petri net modeling formalism allows for the
convenient graphical visualization of system models, as
well as the modeling and performance analysis of
complex stochastic systems. PIPE is an open source,
platform \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carnevali:2022:OTA,
author = "Laura Carnevali and Marco Paolieri and Enrico
Vicario",
title = "The {ORIS} tool: app, library, and toolkit for
quantitative evaluation of non-{Markovian} systems",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "81--86",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543164",
abstract = "ORIS is a tool for quantitative modeling and
evaluation of concurrent systems with non-Markovian
durations. It provides a Graphical User Interface (GUI)
for model specification as Stochastic Time Petri Nets
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Amparore:2022:SME,
author = "Elvio G. Amparore",
title = "Stochastic modelling and evaluation using {GreatSPN}",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "87--91",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543165",
abstract = "GreatSPN is a tool that supports model-based
(stochastic) analysis of Discrete Event Dynamic Systems
(DEDS) modeled as Generalized Stochastic Petri Nets or
one of its extensions like Stochastic Well-formed Nets,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vassio:2022:MOO,
author = "Luca Vassio and Danilo Giordano and Jinoh Kim and Jon
Crowcroft",
title = "Message from the organizers of {WAIN}",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "92--92",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543167",
abstract = "We are pleased to welcome you to the 3rd International
Workshop on AI in Networks and Distributed Systems
(WAIN). The workshop aims to present high-quality
researches leveraging machine learning \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tao:2022:LAO,
author = "Shimin Tao and Weibin Meng and Yimeng Cheng and Yichen
Zhu and Ying Liu and Chunning Du and Tao Han and
Yongpeng Zhao and Xiangguang Wang and Hao Yang",
title = "{LogStamp}: Automatic Online Log Parsing Based on
Sequence Labelling",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "93--98",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543168",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543168",
abstract = "Logs are one of the most critical data for service
management. It contains rich runtime information for
both services and users. Since size of logs are often
enormous in size and have free handwritten
constructions, a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hao:2022:IAA,
author = "Wenwen Hao and Ben Niu and Yin Luo and Kangkang Liu
and Na Liu",
title = "Improving accuracy and adaptability of {SSD} failure
prediction in hyper-scale data centers",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "99--104",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543169",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543169",
abstract = "The rapid expansion of flash-based solid state drives
(SSDs) makes SSD failure an important factor impacting
the reliability of storage systems in data centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ketabi:2022:CAF,
author = "Shiva Ketabi and Matthew Buckley and Parsa Pazhooheshy
and Faraz Farahvash and Yashar Ganjali",
title = "Correlation-Aware Flow Consolidation for Load
Balancing and Beyond",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "105--110",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543170",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543170",
abstract = "Existing load balancing solutions rely on direct or
indirect measurement of rates (or congestion) averaged
over short periods of time. Sudden fluctuations in flow
rates can lead to significant undershooting/
overshooting of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pujol-Perich:2022:UPG,
author = "David Pujol-Perich and Jose Suarez-Varela and Albert
Cabellos-Aparicio and Pere Barlet-Ros",
title = "Unveiling the potential of Graph Neural Networks for
robust Intrusion Detection",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "111--117",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543171",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543171",
abstract = "The last few years have seen an increasing wave of
attacks with serious economic and privacy damages,
which evinces the need for accurate Network Intrusion
Detection Systems (NIDS). Recent works propose the use
of Machine \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertoli:2022:IDS,
author = "Gustavo de Carvalho Bertoli and Louren{\c{c}}o Alves
Pereira J{\'u}nior and Osamu Saotome",
title = "Improving detection of scanning attacks on
heterogeneous networks with {Federated Learning}",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "118--123",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543172",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543172",
abstract = "Scanning attacks are the first step in the attempt to
compromise the security of systems. Machine learning
(ML) has been used for network intrusion detection
systems (NIDS) to protect systems by learning
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barros:2022:UMN,
author = "Matheus F. C. Barros and Carlos H. G. Ferreira and
Lourenco A. P. Junior and Marco Mellia and Jussara M.
Almeida and Bruno Pereira dos Santos",
title = "Understanding mobility in networks: a node embedding
approach",
journal = j-SIGMETRICS,
volume = "49",
number = "4",
pages = "124--130",
month = mar,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3543146.3543173",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Jun 7 06:19:06 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3543146.3543173",
abstract = "Motivated by the growing number of mobile devices
capable of connecting and exchanging messages, we
propose a methodology aiming to model and analyze node
mobility in networks. We note that many existing
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roberts:2022:SBS,
author = "James Roberts and Dario Rossi",
title = "Size-based scheduling vs fairness for datacenter
flows: a queuing perspective",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "2--10",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561076",
abstract = "Contrary to the conclusions of a recent body of work
where approximate shortest remaining processing time
first (SRPT) flow scheduling is advocated for
datacenter networks, this paper aims to demonstrate
that imposing fairness remains a preferable \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2022:SIW,
author = "Mark S. Squillante",
title = "Special Issue on {The Workshop on MAthematical
performance Modeling and Analysis (MAMA 2022)}",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "11--11",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561078",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561078",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shioda:2022:ETR,
author = "Shigeo Shioda and Kenta Takehara",
title = "Ergodicity of Time Reversal Process of Stochastic
Consensus Formation and Its Application",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "12--14",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561079",
abstract = "The consensus reached in stochastic consensus
formation is a random variable whose distribution is
generally difficult to determine analytically. We show
that the time reversal process for the stochastic
consensus formation process is ergodic. This fact
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tavori:2022:QNB,
author = "Jhonatan Tavori and Hanoch Levy",
title = "Queueing-Network Based Applications Under Worst-case
Attacks",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "15--17",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561080",
abstract = "A variety of today's critical applications are based
on queueing networks whose performance, mainly delay,
depends on routing and resource allocation. These
include computer networks (the Internet),
load-balancers on cloud systems and vehicular traffic
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kalvit:2022:DLL,
author = "Anand Kalvit and Assaf Zeevi",
title = "Dynamic Learning in Large Matching Markets",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "18--20",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561081",
abstract = "We study a sequential matching problem faced by large
centralized platforms where ``jobs'' must be matched to
``workers'' subject to uncertainty about worker skill
proficiencies. Jobs arrive at discrete times (possibly
in batches of stochastic size and \ldots{}).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2022:DDO,
author = "Harry Jiang and Xiaoxi Zhang and Carlee Joe-Wong",
title = "{DOLL}: {Distributed OnLine Learning} Using
Preemptible Cloud Instances",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "21--23",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561082",
abstract = "Most large-scale ML implementations scale to large
amounts of data by utilizing multiple servers or
virtual machines (VMs) that iteratively compute model
updates on local data that are periodically
synchronized. Due to the complexity of managing the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2022:STM,
author = "Yuan Yao and Marco Paolieri and Leana Golubchik",
title = "Sojourn Time Minimization of Successful Jobs",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "24--26",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561083",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561083",
abstract = "Due to a growing interest in deep learning
applications [5], compute-intensive and long-running
(hours to days) training jobs have become a significant
component of datacenter workloads. A large fraction of
these jobs is often exploratory, with the goal
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ye:2022:ORR,
author = "Heng-Qing Ye",
title = "Optimal Round-Robin Routing to Parallel Servers in
Heavy Traffic",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "27--29",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561084",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561084",
abstract = "We study a system with heterogeneous parallel servers.
Upon arrival, a job is routed to the queue of one of
the servers. We establish the diffusion limit for the
round-robin (RR) policy, and show that with properly
chosen parameters, it achieves the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anton:2022:SUR,
author = "E. Anton and R. Righter and I. M. Verloop",
title = "Scheduling under redundancy",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "30--32",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561085",
abstract = "In the present extended abstract we investigate the
impact that the scheduling policy has on the
performance of redundancy systems when the usual
exponentially distributed i.i.d. copies assumption is
relaxed. In particular, we investigate the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2022:OSC,
author = "Xiaoqi Tan and Siyuan Yu and Raouf Boutaba",
title = "Online Selection with Convex Costs",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "33--35",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561086",
abstract = "We study a novel online optimization problem, termed
online selection with convex costs (OSCC). In OSCC,
there is a sequence of items, each with a value that
remains unknown before its arrival. At each step when
there is a new arrival, we need to make \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Horvath:2022:ONI,
author = "Illes Horvath and Andras Meszaros and Miklos Telek",
title = "Optimized numerical inverse {Laplace} transformation",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "36--38",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561087",
abstract = "Among the numerical inverse Laplace transformation
(NILT) methods, those that belong to the Abate-Whitt
framework (AWF) are considered to be the most efficient
ones currently. It is a characteristic feature of the
AWF NILT procedures that they are \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ding:2022:COM,
author = "Rui Ding and Eugene Feinberg",
title = "{CVaR} Optimization for {MDPs}: Existence and
Computation of Optimal Policies",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "39--41",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561088",
abstract = "We study the problem of Conditional Value-at-Risk
(CVaR) optimization for a finite-state Markov Decision
Process (MDP) with total discounted costs and the
reduction of this problem to a stochastic game with
perfect information. The CVaR optimization \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kalantzis:2022:QAR,
author = "Vasileios Kalantzis and Mark S. Squillante and
Shashanka Ubaru and Lior Horesh",
title = "On Quantum Algorithms for Random Walks in the
Nonnegative Quarter Plane",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "42--44",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561089",
abstract = "It is well known that strong connections exist between
random walks (RWs) in the nonnegative quarter plane and
the mathematical performance modeling, analysis and
optimization of computer systems and communication
networks. Examples include adaptive \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kannan:2022:SIW,
author = "Pravein Govindan Kannan and Priyanka Naik and Praveen
Tammana and Mythili Vutukuru",
title = "Special Issue on {The Workshop on Performance of
host-based Network Applications (PerfNA 2022)}",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "45--45",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561091",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561091",
abstract = "With the advancement of highly network-powered
paradigms like 5G, Microservices, etc. which are
typically deployed as containers/VMs, there is a
growing imperative on the host nodes to perform
specialized network tasks like monitoring, filtering,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:2022:GGD,
author = "Ziyan Wu and Tianming Cui and Arvind Narayanan and
Yang Zhang and Kangjie Lu and Antonia Zhai and Zhi-Li
Zhang",
title = "{GranularNF}: Granular Decomposition of Stateful {NFV}
at 100 {Gbps} Line Speed and Beyond",
journal = j-SIGMETRICS,
volume = "50",
number = "2",
pages = "46--51",
month = sep,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3561074.3561092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Sep 2 10:20:59 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3561074.3561092",
abstract = "In this paper, we consider the challenges that arise
from the need to scale virtualized network functions
(VNFs) at 100 Gbps line speed and beyond. Traditional
VNF designs are monolithic in state management and
scheduling: internally maintaining all \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scully:2022:NTS,
author = "Ziv Scully",
title = "A New Toolbox for Scheduling Theory",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "3--6",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579344",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579344",
abstract = "Queueing delays are ubiquitous in many domains,
including computer systems, service systems,
communication networks, supply chains, and
transportation. Queueing and scheduling theory provide
a rigorous basis for understanding how to reduce delays
with \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agarwal:2022:CIS,
author = "Anish Agarwal",
title = "Causal Inference for Social and Engineering Systems",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "7--11",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579345",
abstract = "What will happen to Y if we do A? A variety of
meaningful social and engineering questions can be
formulated this way: What will happen to a patient's
health if they are given a new therapy? What will
happen to a country's economy if policy-makers
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2022:ULF,
author = "Zaiwei Chen",
title = "A Unified {Lyapunov} Framework for Finite-Sample
Analysis of Reinforcement Learning Algorithms",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "12--15",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579346",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579346",
abstract = "Reinforcement learning (RL) is a paradigm where an
agent learns to accomplish tasks by interacting with
the environment, similar to how humans learn. RL is
therefore viewed as a promising approach to achieve
artificial intelligence, as evidenced by the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tuli:2022:ACS,
author = "Shreshth Tuli",
title = "{AI} and Co-Simulation Driven Resource Management in
Fog Computing Environments",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "16--19",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579347",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579347",
abstract = "Research Summary: In the past decade, the evolution of
our digital lives has accelerated across multiple
facets, including efficient computation, communication
and transportation, making our lives simpler and more
convenient. This evolution has been \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salem:2022:OLN,
author = "Tareq Si Salem",
title = "Online Learning for Network Resource Allocation",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "20--23",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579348",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579348",
abstract = "Motivation. Connectivity and ubiquity of computing
devices enabled a wide spectrum of network applications
such as content delivery, interpersonal communication,
and intervehicular communication. New use cases (e.g.,
autonomous driving, augmented \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maghakian:2022:ORA,
author = "Jessica Maghakian",
title = "Online Resource Allocation with Noisy Predictions",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "24--27",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579349",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579349",
abstract = "Brief Biography: Jessica Maghakian is a final-year PhD
candidate in Operations Research at Stony Brook
University. She has collaborated with several industry
partners and interned at Microsoft Research NYC.
Jessica's research combines data-driven \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{UlGias:2022:MBR,
author = "Alim {Ul Gias}",
title = "Model-based Resource Management for Fine-grained
Services",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "28--31",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579350",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579350",
abstract = "Brief Biography: Alim Ul Gias is currently a Research
Associate at the Centre for Parallel Computing (CPC),
University of Westminster. He completed his PhD from
Imperial College London in 2022. Before starting his
PhD, Alim was a lecturer at Institute \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2022:RAS,
author = "Xiao Sun",
title = "Resource Allocation and Scheduling in Modern Cloud
Computing",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "32--35",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579351",
abstract = "With various types of resources, infrastructures and
users participating in the current big data ecosystem
at an astonishing speed, resource allocation and
scheduling has been identified as one of the key areas
needing substantial research for the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shang:2022:EDI,
author = "Xiaojun Shang",
title = "Enabling Data-intensive Workflows in Heterogeneous
Edge-cloud Networks",
journal = j-SIGMETRICS,
volume = "50",
number = "3",
pages = "36--38",
month = dec,
year = "2022",
CODEN = "????",
DOI = "https://doi.org/10.1145/3579342.3579352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:10:02 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3579342.3579352",
abstract = "Brief Biography: Xiaojun Shang is currently a Ph.D.
candidate in the Department of Electrical and Computer
Engineering at Stony Brook University under the
supervision of Prof. Yuanyuan Yang. He expects to
graduate by May, 2023. Before joining Stony \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marin:2023:PFN,
author = "Andrea Marin and Sabina Rossi and Diletta Olliaro",
title = "A product-form network for systems with job stealing
policies",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "2--4",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595246",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595246",
abstract = "In this paper, we introduce a new product-form
queueing network model where servers are always busy.
This is obtained by defining a job movement policy that
admits \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:2023:ADC,
author = "Nitesh Kumar and Gaurav S. Kasbekar and D. Manjunath",
title = "Application of Data Collected by Endpoint Detection
and Response Systems for Implementation of a Network
Security System based on Zero Trust Principles and the
{EigenTrust} Algorithm",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "5--7",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595247",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595247",
abstract = "Traditionally, security systems for enterprises have
implicit access based on strong cryptography,
authentication and key sharing, wherein access control
is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jinan:2023:AAP,
author = "Rooji Jinan and Gaurav Gautam and Parimal Parag and
Vaneet Aggarwal",
title = "Asymptotic Analysis of Probabilistic Scheduling for
Erasure-Coded Heterogeneous Systems",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "8--10",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595248",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595248",
abstract = "We consider (k, k) fork-join scheduling on a large
number (say, N) of parallel servers with two sets of
heterogeneous rates. An incoming task is split into k
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hwang:2023:AEM,
author = "Wonjun Hwang and Yoora Kim and Kyunghan Lee",
title = "Augmenting Epidemic Models with Graph Neural
Networks",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "11--13",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595249",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595249",
abstract = "Conventional epidemic models are limited in their
ability to capture the dynamics of real world epidemics
in a sense that they either place restrictions on the
models \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Biswas:2023:EDT,
author = "Sudeshna Biswas and Himanshu and Sushmita Ghosh and
Payali Das and Kaushik Saha and Swades De",
title = "Efficient Data Transfer Mechanism for {DLMS\slash
COSEM} Enabled Smart Energy Metering Platform",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "14--16",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595250",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595250",
abstract = "We report our implementation of DLMS/COSEM (Device
Language Message Specification/Companion Specification
for Energy Metering) enabled \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Umrawal:2023:LCS,
author = "Abhishek K. Umrawal and Vaneet Aggarwal",
title = "Leveraging the Community Structure of a Social Network
for Maximizing the Spread of Influence",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "17--19",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595251",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595251",
abstract = "We consider the problem of Influence Maximization
(IM), the task of selecting k seed nodes in a social
network such that the expected number of nodes
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2023:CCB,
author = "Samarth Gupta and Jinhang Zuo and Carlee Joe-Wong and
Gauri Joshi and Osman Yagan",
title = "Correlated Combinatorial Bandits for Online Resource
Allocation",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "20--22",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595252",
abstract = "We study a sequential resource allocation problem
where, at each round, the decision-maker needs to
allocate its limited budget among different available
entities. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Talukder:2023:EFL,
author = "Zahidur Talukder and Mohammad A. Islam",
title = "Efficient Federated Learning with Self-Regulating
Clients",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "23--25",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595253",
abstract = "Motivation. Since its inception [6], Federated
Learning (FL) has been enjoying a strong interest from
the privacy-preserving AI research community. FL also
has \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jhunjhunwala:2023:HTQ,
author = "Prakirt Raj Jhunjhunwala and Siva Theja Maguluri",
title = "Heavy Traffic Queue Length Distribution without
Resource Pooling in an Input-Queued Switch",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "26--28",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595254",
abstract = "This paper studies the heavy-traffic joint
distribution of queue lengths of an input-queued switch
operating under the MaxWeight scheduling policy.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tuli:2023:LDS,
author = "Shreshth Tuli and Giuliano Casale and Nicholas R.
Jennings",
title = "Learning to Dynamically Select Cost Optimal Schedulers
in Cloud Computing Environments",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "29--31",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595255",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595255",
abstract = "The operational cost of a cloud computing platform is
one of the most significant Quality of Service (QoS)
criteria for schedulers, crucial to keep up with
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saurav:2023:MAI,
author = "Kumar Saurav and Rahul Vaze",
title = "Minimizing Age of Information under Arbitrary Arrival
Model with Arbitrary Packet Size",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "32--34",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595256",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595256",
abstract = "In networked systems such as internet-of-things,
cyberphysical systems, etc., information timeliness is
a critical requirement. Timely updates \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Prakash:2023:ROE,
author = "R Sri Prakash and Nikhil Karamchandani and Sharayu
Moharir",
title = "On the Regret of Online Edge Service Hosting",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "35--37",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595257",
abstract = "We consider the problem of service hosting where a
service provider can dynamically rent edge resources
via short term contracts to ensure better quality of
service \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiffers-Masson:2023:OMA,
author = "Alexandre Reiffers-Masson and Isabel Amigo",
title = "Online Multi-Agent Decentralized {Byzantine}-robust
Gradient Estimation",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "38--40",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595258",
abstract = "In this paper, we propose an iterative scheme for
distributed Byzantine- resilient estimation of a
gradient associated with a black-box model. Our
algorithm is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sam:2023:OLHa,
author = "Tyler Sam and Yudong Chen and Christina Lee Yu",
title = "Overcoming the Long Horizon Barrier for
Sample-Efficient Reinforcement Learning with Latent
Low-Rank Structure",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "41--43",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595259",
abstract = "Reinforcement learning (RL) methods have been
increasingly popular in sequential decision making
tasks due to its empirical success. However, large
state and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2023:PPD,
author = "Qi Li and Dong Chen",
title = "Peer to Peer Distributed Solar Energy Trading",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "44--46",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595260",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595260",
abstract = "Solar module prices have dramatically dropped in
recent years, which in turn has facilitated distributed
solar energy resources (DSERs) in smart grids. To
manage \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2023:PAM,
author = "Wenxin Li",
title = "Performance Analysis of Modified {SRPT} in
Multiple-Processor Multitask Scheduling",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "47--49",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595261",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595261",
abstract = "In this paper we study the multiple-processor
multitask scheduling problem in both deterministic and
stochastic models, where each job have several tasks
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hossen:2023:PEM,
author = "Md Rajib Hossen and Mohammad A. Islam",
title = "Practical Efficient Microservice Autoscaling",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "50--52",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595262",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595262",
abstract = "Motivation. In recent years, the adoption of
microservices in production systems has been steadily
growing. With their loosely-coupled and lightweight
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2023:R,
author = "Yinan Zhu and Chunhui Duan and Xuan Ding",
title = "{RoSense}",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "53--55",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595263",
abstract = "RFID sensing leveraging backscatter signal features
(e.g., phase shift) from tags has gained increasing
popularity in numerous applications, but also suffers
from \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mittal:2023:SSR,
author = "Daksh Mittal and Sandeep Juneja and Shubhada Agrawal",
title = "Shift, scale and restart smaller models to estimate
larger ones: Agent based simulators in epidemiology",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "56--58",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595264",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595264",
abstract = "Agent-based simulators (ABS) are a popular
epidemiological modelling tool to study the impact of
non medical interventions in managing epidemics [1],
[2]. They \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choudhury:2023:THT,
author = "Tuhinangshu Choudhury and Weina Wang and Gauri Joshi",
title = "Tackling Heterogeneous Traffic in Multi-access Systems
via Erasure Coded Servers",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "59--61",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595265",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595265",
abstract = "In cloud systems, the number of servers is fixed, and
each server is usually dedicated to a job type.
However, the traffic of various jobs can vary across
time, and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Regmi:2023:TDL,
author = "Hem Regmi and Sanjib Sur",
title = "Towards Deep Learning Augmented Robust {D}-Band
Millimeter-Wave Picocell Deployment",
journal = j-SIGMETRICS,
volume = "50",
number = "4",
pages = "62--64",
month = mar,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3595244.3595266",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon May 1 08:11:28 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3595244.3595266",
abstract = "D-band millimeter-wave, a key wireless technology for
beyond 5G networks, promises extremely high data rate,
ultra-low latency, and enables new Internet \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maruf:2023:MMD,
author = "Hasan {Al Maruf} and Yuhong Zhong and Hongyi Wang and
Mosharaf Chowdhury and Asaf Cidon and Carl
Waldspurger",
title = "{Memtrade}: Marketplace for Disaggregated Memory
Clouds",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "1--2",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593553",
abstract = "We present Memtrade, the first practical marketplace
for disaggregated memory clouds. Clouds introduce a set
of unique challenges for resource \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Addanki:2023:MNO,
author = "Vamsi Addanki and Chen Avin and Stefan Schmid",
title = "{Mars}: Near-Optimal Throughput with Shallow Buffers
in Reconfigurable Datacenter Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "3--4",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593551",
abstract = "The performance of large-scale computing systems often
critically depends on high-performance communication
networks. Dynamically reconfigurable \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Giannoula:2023:ASE,
author = "Christina Giannoula and Kailong Huang and Jonathan
Tang and Nectarios Koziris and Georgios Goumas and
Zeshan Chishti and Nandita Vijaykumar",
title = "Architectural Support for Efficient Data Movement in
Fully Disaggregated Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "5--6",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593533",
abstract = "Traditional data centers include monolithic servers
that tightly integrate CPU, memory and disk (Figure
1a). Instead, Disaggregated Systems (DSs) [8, 13, 18,
27] \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zerwas:2023:DHT,
author = "Johannes Zerwas and Csaba Gy{\"o}rgyi and Andreas
Blenk and Stefan Schmid and Chen Avin",
title = "{Duo}: a High-Throughput Reconfigurable Datacenter
Network Using Local Routing and Control",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "7--8",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593537",
abstract = "The performance of many cloud-based applications
critically depends on the capacity of the underlying
datacenter network. A particularly innovative
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2023:TAD,
author = "Jiaxin Lin and Tao Ji and Xiangpeng Hao and Hokeun Cha
and Yanfang Le and Xiangyao Yu and Aditya Akella",
title = "Towards Accelerating Data Intensive {Application}'s
Shuffle Process Using {SmartNICs}",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "9--10",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593577",
abstract = "Emerging SmartNIC creates new opportunities to offload
application-level computation into the networking
layer. Shuffle, the all-to-all data \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2023:DBF,
author = "Wenzhe Zhu and Yongkun Li and Erci Xu and Fei Li and
Yinlong Xu and John C. S. Lui",
title = "{DiffForward}: On Balancing Forwarding Traffic for
Modern Cloud Block Services via Differentiated
Forwarding",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "11--12",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593536",
abstract = "Modern cloud block service provides cloud users with
virtual block disks (VDisks), and it usually relies on
a forwarding layer consisting of multiple proxy
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:2023:SCD,
author = "Adithya Kumar and Anand Sivasubramaniam and Timothy
Zhu",
title = "{SplitRPC}: a Control + Data Path Splitting {RPC}
Stack for {ML} Inference Serving",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "13--14",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593571",
abstract = "The growing adoption of hardware accelerators driven
by their intelligent compiler and runtime system
counterparts has democratized ML services and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gozlan:2023:GCB,
author = "Itamar Gozlan and Chen Avin and Gil Einziger and
Gabriel Scalosub",
title = "Go-to-Controller is Better: Efficient and Optimal
{LPM} Caching with Splicing",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "15--16",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593546",
abstract = "Data center networks must support huge forwarding
policies as they handle the traffic of the various
tenants. Since such policies cannot be stored within
the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeSensi:2023:NCI,
author = "Daniele {De Sensi} and Tiziano {De Matteis} and
Konstantin Taranov and Salvatore {Di Girolamo} and
Tobias Rahn and Torsten Hoefler",
title = "Noise in the Clouds: Influence of Network Performance
Variability on Application Scalability",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "17--18",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593555",
abstract = "Cloud computing represents an appealing opportunity
for cost-effective deployment of HPC workloads on the
best-fitting hardware. However, although cloud and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:SFF,
author = "Yi Liu and Shouqian Shi and Minghao Xie and Heiner
Litz and Chen Qian",
title = "Smash: Flexible, Fast, and Resource-efficient
Placement and Lookup of Distributed Storage",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "19--20",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593569",
abstract = "Smash is a new placement and lookup method for
distributed storage systems. It achieves full placement
flexibility and low DRAM cost to store ID-to-location
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2023:SSL,
author = "Wangkai Jin and Xiangjun Peng",
title = "{SLITS}: Sparsity-Lightened Intelligent Thread
Scheduling",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "21--22",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593568",
abstract = "To make the most of hardware resources in multi-core
architectures, effective thread scheduling is crucial.
To achieve this, various scheduling objectives have
been \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:AAP,
author = "Hongyuan Liu and Sreepathi Pai and Adwait Jog",
title = "Asynchronous Automata Processing on {GPUs}",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "23--24",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593524",
abstract = "Finite-state automata serve as compute kernels for
application domains such as pattern matching and data
analytics. Existing approaches on GPUs \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2023:GGA,
author = "Canhui Chen and Zhixuan Fang",
title = "Gacha Game Analysis and Design",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "25--26",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593544",
abstract = "Gacha game is a special opaque selling approach, where
the seller is selling gacha pulls to the buyer. Each
gacha pull provides a certain probability for the buyer
to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rutten:2023:MFA,
author = "Daan Rutten and Debankur Mukherjee",
title = "Mean-field Analysis for Load Balancing on Spatial
Graphs",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "27--28",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593552",
abstract = "A pivotal methodological tool behind the analysis of
large-scale load balancing systems is mean-field
analysis. The high-level idea is to represent the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allmeier:2023:BRM,
author = "Sebastian Allmeier and Nicolas Gast",
title = "Bias and Refinement of Multiscale Mean Field Models",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "29--30",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593527",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593527",
abstract = "We analyze the error of an ODE approximation of a
generic two-timescale model (X, Y), where the slow
component X describes a population of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salem:2023:ELT,
author = "Tareq Si Salem and Georgios Iosifidis and Giovanni
Neglia",
title = "Enabling Long-term Fairness in Dynamic Resource
Allocation",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "31--32",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593541",
abstract = "We study the fairness of dynamic resource allocation
problem under the $ \alpha $-fairness criterion. We
recognize two different fairness objectives that
naturally arise in \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayvasik:2023:PPE,
author = "Serkut Ayvasik and Fidan Mehmeti and Edwin Babaians
and Wolfgang Kellerer",
title = "{PEACH}: Proactive and Environment Aware Channel State
Information Prediction with Depth Images",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "33--34",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593563",
abstract = "Up-to-date and accurate prediction of Channel State
Information (CSI) is of paramount importance in
Ultra-Reliable Low-Latency Communications \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:FPS,
author = "Yunzhuo Liu and Bo Jiang and Tian Guo and Zimeng Huang
and Wenhao Ma and Xinbing Wang and Chenghu Zhou",
title = "{FuncPipe}: a Pipelined Serverless Framework for Fast
and Cost-efficient Training of Deep Learning Models",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "35--36",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593543",
abstract = "Training deep learning (DL) models in the cloud has
become a norm. With the emergence of serverless
computing and its benefits of true pay-as-you-go
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{K:2023:CPA,
author = "Prashanthi S. K and Sai Anuroop Kesanapalli and Yogesh
Simmhan",
title = "Characterizing the Performance of Accelerated {Jetson}
Edge Devices for Training Deep Learning Models",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "37--38",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593530",
abstract = "Deep Neural Network (DNN) models are becoming
ubiquitous in a variety of contemporary domains such as
Autonomous Vehicles, Smart cities and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abyaneh:2023:MMA,
author = "Ali Hossein Abbasi Abyaneh and Maizi Liao and Seyed
Majid Zahedi",
title = "{Malcolm}: Multi-agent Learning for Cooperative Load
Management at Rack Scale",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "39--40",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593550",
abstract = "We consider the problem of balancing the load among
servers in dense racks for microsecond-scale workloads.
To balance the load in such settings, tens of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{MacMillan:2023:CAO,
author = "Kyle MacMillan and Tarun Mangla and James Saxon and
Nicole P. Marwell and Nick Feamster",
title = "A Comparative Analysis of {Ookla Speedtest and
Measurement Labs Network Diagnostic Test (NDT7)}",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "41--42",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593522",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593522",
abstract = "Consumers, regulators, and ISPs all use client-based
``speed tests'' to measure network performance, both in
single-user settings and in aggregate. Two \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:2023:EOP,
author = "Rashna Kumar and Sana Asif and Elise Lee and
Fabi{\'a}n E. Bustamante",
title = "Each at its Own Pace: Third-Party Dependency and
Centralization Around the World",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "43--44",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593539",
abstract = "We describe the results of a large-scale study of
third-party dependencies around the world based on
regional top-500 popular websites accessed from
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2023:DMA,
author = "Haoran Lu and Qingchuan Zhao and Yongliang Chen and
Xiaojing Liao and Zhiqiang Lin",
title = "Detecting and Measuring Aggressive Location Harvesting
in Mobile Apps via Data-flow Path Embedding",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "45--46",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593535",
abstract = "Today, location-based services have become prevalent
in the mobile platform, where mobile apps provide
specific services to a user based on his or her
location. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hsu:2023:FLI,
author = "Amanda Hsu and Frank Li and Paul Pearce",
title = "{Fiat Lux}: Illuminating {IPv6} Apportionment with
Different Datasets",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "47--48",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593542",
abstract = "IPv6 adoption continues to grow, making up more than
40\% of client traffic to Google globally. While the
ubiquity of the IPv4 address space makes it comparably
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiesa:2023:NMM,
author = "Marco Chiesa and F{\'a}bio L. Verdi",
title = "Network Monitoring on Multi-Pipe Switches",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "49--50",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593554",
abstract = "Programmable switches have been widely used to design
network monitoring solutions that operate in the fast
data-plane level, e.g., detecting heavy hitters,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2023:RTS,
author = "Haibo Wang and Dimitrios Melissourgos and Chaoyi Ma
and Shigang Chen",
title = "Real-time Spread Burst Detection in Data Streaming",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "51--52",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593566",
abstract = "Data streaming has many applications in network
monitoring, web services, e-commerce, stock trading,
social networks, and distributed sensing. This paper
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Naseer:2023:JCF,
author = "Usama Naseer and Theophilus A. Benson",
title = "{JS} Capsules: a Framework for Capturing Fine-grained
{JavaScript} Memory Measurements for the Mobile
{Web}.",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "53--54",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/java2020.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593548",
abstract = "Understanding the resource consumption of the mobile
web is an important topic that has garnered much
attention in recent years. However, existing works
mostly \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Banerjee:2023:OFA,
author = "Siddhartha Banerjee and Chamsi Hssaine and Sean R.
Sinclair",
title = "Online Fair Allocation of Perishable Resources",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "55--56",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593558",
abstract = "We consider a practically motivated variant of the
canonical online fair allocation problem: a
decision-maker has a budget of resources to allocate
over a fixed \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:DPP,
author = "Mozhengfu Liu and Xueyan Tang",
title = "Dynamic Bin Packing with Predictions",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "57--58",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593538",
abstract = "The MinUsageTime Dynamic Bin Packing (DBP) problem
aims to minimize the accumulated bin usage time for
packing a sequence of items into bins. It is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2023:OKP,
author = "Bo Sun and Lin Yang and Mohammad Hajiesmaili and Adam
Wierman and John C. S. Lui and Don Towsley and Danny H.
K. Tsang",
title = "The Online Knapsack Problem with Departures",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "59--60",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593576",
abstract = "The online knapsack problem is a classic online
resource allocation problem in networking and
operations research. Its basic version studies how to
pack online \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2023:PKB,
author = "Fengjiao Li and Xingyu Zhou and Bo Ji",
title = "{(Private)} Kernelized Bandits with Distributed Biased
Feedback",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "61--62",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593565",
abstract = "We study kernelized bandits with distributed biased
feedback. This problem is motivated by several
real-world applications (such as dynamic pricing,
cellular network \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balseiro:2023:ORA,
author = "Santiago Balseiro and Christian Kroer and Rachitesh
Kumar",
title = "Online Resource Allocation under Horizon Uncertainty",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "63--64",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593559",
abstract = "We study stochastic online resource allocation: a
decision maker needs to allocate limited resources to
stochastically-generated sequentially-arriving requests
in \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cui:2023:SAC,
author = "Shuang Cui and Kai Han and Jing Tang and He Huang and
Xueying Li and Zhiyu Li",
title = "Streaming Algorithms for Constrained Submodular
Maximization",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "65--66",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593573",
abstract = "Due to the pervasive ``diminishing returns'' property
appeared in data-intensive applications, submodular
maximization problems have aroused great \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vial:2023:RMA,
author = "Daniel Vial and Sanjay Shakkottai and R. Srikant",
title = "Robust Multi-Agent Bandits Over Undirected Graphs",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "67--68",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593567",
abstract = "We consider a multi-agent multi-armed bandit setting
in which n honest agents collaborate over a network to
minimize regret but m malicious agents can \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mhaisen:2023:ONR,
author = "Naram Mhaisen and Abhishek Sinha and Georgios Paschos
and George Iosifidis",
title = "Optimistic No-regret Algorithms for Discrete Caching",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "69--70",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593561",
abstract = "We take a systematic look at the problem of storing
whole files in a cache with limited capacity in the
context of optimistic learning, where the caching
policy \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rutten:2023:SOO,
author = "Daan Rutten and Nicolas Christianson and Debankur
Mukherjee and Adam Wierman",
title = "Smoothed Online Optimization with Unreliable
Predictions",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "71--72",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593570",
abstract = "We consider online optimization with switching costs
in a normed vector space (X, ||$ \cdot $ ||) wherein,
at each time t, a decision maker observes a non-convex
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2023:OAS,
author = "Jing Yu and Dimitar Ho and Adam Wierman",
title = "Online Adversarial Stabilization of Unknown Networked
Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "73--74",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593557",
abstract = "We investigate the problem of stabilizing an unknown
networked linear system under communication constraints
and adversarial disturbances. We propose the first
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williams:2023:MMK,
author = "Jalani K. Williams and Mor Harchol-Balter and Weina
Wang",
title = "The {M/M/$k$} with Deterministic Setup Times",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "75--76",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593575",
abstract = "Capacity management, whether it involves servers in a
data center, or human staff in a call center, or
doctors in a hospital, is largely about balancing a
resource-delay \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fu:2023:JLC,
author = "Xinzhe Fu and Eytan Modiano",
title = "Joint Learning and Control in Stochastic Queueing
Networks with Unknown Utilities",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "77--78",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593547",
abstract = "We study the optimal control problem in stochastic
queueing networks with a set of job dispatchers
connected to a set of parallel servers with queues.
Jobs arrive at \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wei:2023:CRP,
author = "Yehua Wei and Jiaming Xu and Sophie H. Yu",
title = "Constant Regret Primal-Dual Policy for Multi-way
Dynamic Matching",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "79--80",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593532",
abstract = "We study a discrete-time dynamic multi-way matching
model. There are finitely many agent types that arrive
stochastically and wait to be matched. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huo:2023:BEM,
author = "Dongyan (Lucy) Huo and Yudong Chen and Qiaomin Xie",
title = "Bias and Extrapolation in {Markovian} Linear
Stochastic Approximation with Constant Stepsizes",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "81--82",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593526",
abstract = "We consider Linear Stochastic Approximation (LSA) with
constant stepsize and Markovian data. Viewing the joint
process of the data and LSA iterate as a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2023:GCL,
author = "Yizhou Zhang and Guannan Qu and Pan Xu and Yiheng Lin
and Zaiwei Chen and Adam Wierman",
title = "Global Convergence of Localized Policy Iteration in
Networked Multi-Agent Reinforcement Learning",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "83--84",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593545",
abstract = "We study a multi-agent reinforcement learning (MARL)
problem where the agents interact over a given network.
The goal of the agents is to cooperatively \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sam:2023:OLHb,
author = "Tyler Sam and Yudong Chen and Christina Lee Yu",
title = "Overcoming the Long Horizon Barrier for
Sample-Efficient Reinforcement Learning with Latent
Low-Rank Structure",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "85--86",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593562",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593562",
abstract = "Reinforcement learning (RL) methods have been
increasingly popular in sequential decision making
tasks due to its empirical success. However, large
state and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2023:DMS,
author = "Mingming Zhang and Xiang Li and Baojun Liu and JianYu
Lu and Yiming Zhang and Jianjun Chen and Haixin Duan
and Shuang Hao and Xiaofeng Zheng",
title = "Detecting and Measuring Security Risks of
Hosting-Based Dangling Domains",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "87--88",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593534",
abstract = "Public hosting services offer a convenient and secure
option for creating web applications. However,
adversaries can take over a domain by exploiting
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldberg:2023:BTU,
author = "Alexander Goldberg and Giulia Fanti and Nihar B.
Shah",
title = "Batching of Tasks by Users of Pseudonymous Forums:
Anonymity Compromise and Protection",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "89--90",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593525",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593525",
abstract = "In a number of applications where anonymity is
critical, users act under pseudonyms to preserve their
privacy. For instance, in scientific peer review
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2023:CCT,
author = "Kailong Wang and Yuxi Ling and Yanjun Zhang and Zhou
Yu and Haoyu Wang and Guangdong Bai and Beng Chin Ooi
and Jin Song Dong",
title = "Characterizing Cryptocurrency-themed Malicious Browser
Extensions",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "91--92",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593529",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/cryptography2020.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593529",
abstract = "Due to the surging popularity of various
cryptocurrencies in recent years, a large number of
browser extensions have been developed as portals to
access \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:2023:SLR,
author = "Weizhao Tang and Lucianna Kiffer and Giulia Fanti and
Ari Juels",
title = "Strategic Latency Reduction in Blockchain Peer-to-Peer
Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "93--94",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593572",
abstract = "Most permissionless blockchain networks run on
peer-to-peer (P2P) networks, which offer flexibility
and decentralization at the expense of performance
(e.g., \ldots{})",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varma:2023:PDC,
author = "Sushil Mahavir Varma and Francisco Castro and Siva
Theja Maguluri",
title = "Power-of-d Choices Load Balancing in the Sub-{Halfin
Whitt} Regime",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "95--96",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593564",
abstract = "We characterize the steady-state queue length
distribution for the Power-of-d choices routing
algorithm for almost all values of d in the sub-Halfin
Whitt \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2023:SAI,
author = "Benny {Van Houdt}",
title = "On the Stochastic and Asymptotic Improvement of
First-Come First-Served and Nudge Scheduling",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "97--98",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593556",
abstract = "Recently it was shown that, contrary to expectations,
the First-Come-First-Served (FCFS) scheduling algorithm
can be stochastically improved upon by a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2023:OSMa,
author = "Isaac Grosof and Ziv Scully and Mor Harchol-Balter and
Alan Scheller-Wolf",
title = "Optimal Scheduling in the Multiserver-job Model under
Heavy Traffic",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "99--100",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593560",
abstract = "Multiserver-job systems, where jobs require concurrent
service at many servers, occur widely in practice.
Essentially all of the theoretical work on \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kadota:2023:SRP,
author = "Igor Kadota and Dror Jacoby and Hagit Messer and Gil
Zussman and Jonatan Ostrometzky",
title = "Switching in the Rain: Predictive Wireless x-haul
Network Reconfiguration",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "101--102",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593574",
abstract = "4G, 5G, and smart city networks often rely on
microwave and millimeter-wave x-haul links. A major
challenge associated with these high frequency links is
their \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:FLW,
author = "Ruofeng Liu and Nakjung Choi",
title = "A First Look at {Wi-Fi 6} in Action: Throughput,
Latency, Energy Efficiency, and Security",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "103--104",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593523",
abstract = "We present the performance measurement of Wi-Fi 6
(IEEE 802.11ax). Our experiments focus on multi-client
scenarios. The results reveal the impact of the new
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2023:CCB,
author = "Ding Zhang and Panneer Selvam Santhalingam and Parth
Pathak and Zizhan Zheng",
title = "{CoBF}: Coordinated Beamforming in Dense {mmWave}
Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "105--106",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593531",
abstract = "With MIMO and enhanced beamforming features, IEEE
802.11ay is poised to create the next generation of
mmWave WLANs that can provide over 100 Gbps data
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:LPM,
author = "Yilin Liu and Shijia Zhang and Mahanth Gowda and
Srihari Nelakuditi",
title = "Leveraging the Properties of {mmWave} Signals for
{$3$D} Finger Motion Tracking for Interactive {IoT}
Applications",
journal = j-SIGMETRICS,
volume = "51",
number = "1",
pages = "107--108",
month = jun,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3606376.3593549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jul 3 08:05:17 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3606376.3593549",
abstract = "Wireless signals, which are mainly used for
communication networks, also have the potential to
extend our senses, enabling us to see behind closed
doors and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2023:SIW,
author = "Mark S. Squillante",
title = "Special Issue on {The Workshop on MAthematical
performance Modeling and Analysis (MAMA 2023)}",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "2",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626572",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pan:2023:SCO,
author = "Weici Pan and Zhenhua Liu",
title = "Switching Constrained Online Convex Optimization with
Predictions and Feedback Delays",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "3--5",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626573",
abstract = "In various applications such as smart grids, the
online player is allowed a limited number of switches
among decisions. Additionally, real-world scenarios
often involve \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2023:NSR,
author = "Isaac Grosof and Mor Harchol-Balter and Alan
Scheller-Wolf",
title = "New Stability Results for Multiserver-job Models via
Product-form Saturated Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "6--8",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626574",
abstract = "Multiserver-job (MSJ) models are increasingly common
in today's datacenters. In these models, each job runs
on multiple servers concurrently, for some \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anton:2023:SDR,
author = "E. Anton and K. Gardner",
title = "The stationary distribution of the redundancy-$d$
model with random order of service",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "9--11",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626575",
abstract = "Redundancy has gained considerable attention as a
dispatching paradigm that promises the potential for
significant response time improvements, see \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2023:ILS,
author = "Runhan Xie and Kristen Gardner and Rhonda Righter",
title = "Insensitivity for Loss Systems with Compatibilities",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "12--14",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626576",
abstract = "In the study of queueing systems, we are often
interested in finding the stationary distribution of
the system state, which in turn can be used to compute
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bor:2023:FME,
author = "Julianna Bor and Giuliano Casale and William
Knottenbelt and Evgenia Smirni and Andreas
Stathopoulos",
title = "Fitting with matrix exponential mixtures generated by
discrete probabilistic scaling",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "15--17",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626577",
abstract = "Matrix exponential (ME) distributions generalize
phase-type distributions; however, their use in
queueing theory is hampered by the difficulty of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:2023:ORM,
author = "Zhisheng Zhao and Debankur Mukherjee",
title = "Optimal Rate-Matrix Pruning For Heterogeneous
Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "18--20",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626578",
abstract = "We consider large-scale load balancing systems where
processing time distribution of tasks depend on both
task and server types. We analyze the system in
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rutten:2023:DRS,
author = "Daan Rutten and Martin Zubeldia and Debankur
Mukherjee",
title = "Distributed Rate Scaling in Large-Scale Service
Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "21--23",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626579",
abstract = "We consider a large-scale parallel-server system,
where each server dynamically chooses its processing
speed in a completely distributed fashion. The goal is
to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jhunjhunwala:2023:ETB,
author = "Prakirt Jhunjhunwala and Daniela Hurtado-Lange and
Siva Theja Maguluri",
title = "Exponential Tail Bounds on Queues",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "24--26",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626580",
abstract = "A popular approach to computing performance measures
of queueing systems (such as delay and queue length) is
studying the system in an asymptotic \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2023:USQ,
author = "Florin Ciucu and Sima Mehri and Amr Rizk",
title = "On Ultra-Sharp Queueing Bounds",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "27--29",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626581",
abstract = "Martingale-based techniques render sharp bounds in
several queueing scenarios, but mainly in heavy-traffic
and subject to the degree of burstiness. We present a
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2023:WOP,
author = "Nicolas Gast and Bruno Gaujal and Kimang Khun",
title = "What is an Optimal Policy in Time-Average {MDP}?",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "30--32",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626582",
abstract = "This paper discusses the notion of optimality for
time-average MDPs. We argue that while most authors
claim to use the ``average reward'' criteria, the
notion that is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:2023:PGP,
author = "Yige Hong and Ziv Scully",
title = "Performance of the {Gittins Policy} in the {G/G/1} and
{G/G/$k$}, With and Without Setup Times",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "33--35",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626583",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626583",
abstract = "We consider the classic problem of preemptively
scheduling jobs of unknown size (a.k.a. service time)
in a queue to minimize mean \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2023:RHT,
author = "Runhan Xie and Ziv Scully",
title = "Reducing Heavy-Traffic Response Time with Asymmetric
Dispatching",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "36--38",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626584",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626584",
abstract = "Reducing mean response time has always been a
desirable goal in queueing systems. If job sizes
(a.k.a. service times) are known to the scheduler, the
policy that \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2023:MDP,
author = "Yingdong Lu and Mark S. Squillante and Chai Wah Wu",
title = "{Markov} Decision Process Framework for Control-Based
Reinforcement Learning",
journal = j-SIGMETRICS,
volume = "51",
number = "2",
pages = "39--41",
month = sep,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3626570.3626585",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Tue Oct 3 05:43:54 MDT 2023",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3626570.3626585",
abstract = "For many years, reinforcement learning (RL) has proven
to be very successful in solving a wide variety of
learning and decision making under uncertainty (DMuU)
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alouf:2023:ASS,
author = "Sara Alouf",
title = "{ACM SIGMETRICS 2023 Student Research Competition}",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "2--2",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639832",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639832",
abstract = "Every year, the Association for Computing Machinery
(ACM) spearheads a series of Student Research
Competitions (SRCs) at ACM-sponsored or co-sponsored
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{NovaesdeOliveira:2023:LOR,
author = "Amanda Camacho {Novaes de Oliveira}",
title = "Learning the Optimal Representation Dimension for
Restricted {Boltzmann} Machines",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "3--5",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639833",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639833",
abstract = "Hyperparameters refer to a set of parameters of a
machine learning model that are fixed and not adjusted
during training. A fundamental problem in this
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Niu:2023:GLB,
author = "Zifeng Niu",
title = "Graph Learning based Performance Analysis for Queueing
Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "6--7",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639834",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639834",
abstract = "Queueing networks serve as a popular performance model
in the analysis of business processes and computer
systems [4]. Solving queueing network \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2023:RTM,
author = "Isaac Grosof",
title = "The {RESET} Technique for Multiserver-Job Analysis",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "8--9",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639835",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639835",
abstract = "Multiserver queueing theory emphasizes
one-server-per-job models, such as the M/G/k. Such
models were popular for decades in the study of
computing \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hossen:2023:PCR,
author = "Md Rajib Hossen",
title = "{PEMA+}: a Comprehensive Resource Manager for
Microservices",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "10--12",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639836",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639836",
abstract = "Motivation. Microservices architecture has become more
prevalent in cloud-based applications where small,
loosely coupled service components work together
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2023:DED,
author = "Yuanyuan Li",
title = "Distributed Experimental Design Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "13--15",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639837",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639837",
abstract = "As edge computing capabilities increase, model
learning deployments in a heterogeneous edge
environment have emerged. We consider an \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2023:ZCE,
author = "Xiangan He",
title = "{Zephyr}: a Cost-Effective, Zero-Knowledge Light
Client for Enhanced Blockchain Interoperability",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "16--18",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639838",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639838",
abstract = "Blockchains are siloed by nature. A longtime
limitation of blockchain technology is that individual
cryptocurrencies are bound to their own chains. Users
and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouza:2023:EEW,
author = "Beatriz Pereira de Souza and Marcio Nunes de Miranda
and Luiz Maltar Castello Branco",
title = "An Energy-efficient Wireless Sensor Network Applied to
Greenhouse Cultivation",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "19--21",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639839",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639839",
abstract = "Wireless sensors have already been used for a long
time in military, health, and agricultural environments
[6]. Despite the advances in precision agriculture,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahmed:2023:BPE,
author = "Nishat Ahmed and Amaan Rahman and Lucia Rhode",
title = "Best Practices for Exoskeleton Evaluation Using
{DeepLabCut}",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "22--24",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639840",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639840",
abstract = "Exoskeleton fit evaluation using pose estimation is
necessary to ensure exoskeletons promote productivity
in industrial settings. However, both \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2023:EEB,
author = "Tianhao Huang and Xiaozhi Zhu and Mo Niu",
title = "An End-to-End Benchmarking Tool for Analyzing the
Hardware-Software Implications of Multi-modal {DNNs}",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "25--27",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639841",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639841",
abstract = "Abstract-Multi-modal deep neural networks (DNNs) have
become increasingly pervasive in many machine learning
application domains due to their superior \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayesta:2023:FCA,
author = "Urtzi Ayesta",
title = "Foreword from {Chair of 2023 ACM SIGMETRICS Doctoral
Dissertation Award Committee}",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "28--28",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639843",
abstract = "The ACM SIGMETRICS Doctoral Dissertation Award
recognizes outstanding thesis research by doctoral
candidates in the field of performance evaluation
analysis \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2023:OSMb,
author = "Isaac Grosof",
title = "Optimal Scheduling in Multiserver Queues",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "29--32",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639844",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639844",
abstract = "Scheduling theory is a key tool for reducing latency
(i.e. response time) in queueing systems. Scheduling,
i.e. choosing the order in which to serve jobs, can
reduce \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jhunjhunwala:2023:DAS,
author = "Prakirt Raj Jhunjhunwala",
title = "Design and Analysis of Stochastic Processing and
Matching Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "33--37",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639845",
abstract = "Stochastic Processing Networks (SPNs) [24] are
ubiquitous in engineering with applications in Data
Centers (eg. packet routing), Telecommunication
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sinclair:2023:ASO,
author = "Sean R. Sinclair",
title = "Adaptivity, Structure, and Objectives in Sequential
Decision-Making",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "38--41",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639846",
abstract = "Sequential decision-making algorithms are ubiquitous
in the design and optimization of large-scale systems
due to their practical impact. The typical algorithmic
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Corbett:2023:ESP,
author = "Matthew Corbett",
title = "Enhancing Security and Privacy in Head-Mounted
Augmented Reality Systems Using Eye Gaze",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "42--45",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639848",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639848",
abstract = "Augmented Reality (AR) devices offer a rich, immersive
experience that provides the user with a blend of the
physical and the synthetic, digitally augmented
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2023:DFI,
author = "Zhongdong Liu",
title = "Data Freshness in Information-update Systems:
Modeling, Scheduling, and Tradeoffs",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "46--49",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639849",
abstract = "Brief Biography: Zhongdong Liu is a final-year Ph.D.
student in the Department of Computer Science at
Virginia Tech. He received his B.S. degree in
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ornee:2023:TTR,
author = "Tasmeen Zaman Ornee",
title = "Theory of Timely Remote Estimation and Application to
Situational Awareness",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "50--53",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639850",
abstract = "Brief Biography: Tasmeen Zaman Ornee is a PhD
candidate at the Electrical and Computer Engineering
Department of Auburn University. She joined the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2023:DRC,
author = "Abhin Shah",
title = "Data-Rich Causal Inference",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "54--57",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639851",
abstract = "Brief Biography: Abhin Shah is a final-year Ph.D.
student in the department of Electrical Engineering and
Computer Science at MIT, where he is a recipient of
MIT's \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shisher:2023:TIN,
author = "Md Kamran Chowdhury Shisher",
title = "Timely Inference over Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "58--61",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639852",
abstract = "Brief Biography: Md Kamran Chowdhury Shisher is a
final-year Ph.D. candidate in the Department of
Electrical and Computer Engineering at Auburn
University, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:2023:IFM,
author = "Vishrant Tripathi",
title = "Information Freshness for Monitoring and Control over
Wireless Networks",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "62--65",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639853",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639853",
abstract = "Brief Biography: Vishrant Tripathi obtained his PhD
from the EECS department at MIT, working with Prof.
Modiano at the Lab for Information and Decision Systems
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zuo:2023:LEN,
author = "Jinhang Zuo",
title = "Learning-enabled Networked Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "3",
pages = "66--69",
month = dec,
year = "2023",
CODEN = "????",
DOI = "https://doi.org/10.1145/3639830.3639854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Jan 8 07:22:27 MST 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3639830.3639854",
abstract = "Brief Biography: Jinhang Zuo is a joint postdoc at
UMass Amherst and Caltech. He received his Ph.D. in ECE
from CMU in 2022. His main research interests include
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Braverman:2024:FGC,
author = "Anton Braverman and Varun Gupta",
title = "Foreword from the general chairs",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "2",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649479",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649479",
abstract = "It is our pleasure to welcome you to the 41st IFIP
PERFORMANCE International Conference, organized by the
Kellogg School of Management at Northwestern
University, USA. This is the first post-COVID
PERFORMANCE conference held fully in person, after the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:2024:MTP,
author = "Paul Barford and Maria Vlasiou and Lei Ying",
title = "Message from the {Technical Program Committee
Chairs}",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "3",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649480",
abstract = "It is with great pleasure that we present to you this
publication, the proceedings of IFIP Performance 2023.
This issue includes extended abstracts of all regular
papers accepted at the conference and the full short
papers. This year's program boasts a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scherrer:2024:QCA,
author = "Simon Scherrer and Seyedali Tabaeiaghdaei and Adrian
Perrig",
title = "Quality Competition Among {Internet} Service
Providers",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "4--5",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649481",
abstract = "Internet service providers (ISPs) have a variety of
quality attributes that determine their attractiveness
for data transmission, ranging from quality-of-service
metrics such as jitter to security properties such as
the presence of DDoS defense systems. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2024:RMT,
author = "Isaac Grosof and Yige Hong and Mor Harchol-Balter and
Alan Scheller-Wolf",
title = "The {RESET} and {MARC} Techniques, with Application to
Multiserver-Job Analysis",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "6--7",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649482",
abstract = "Multiserver- job (MSJ) systems, where jobs need to run
concurrently across many servers, are increasingly
common in practice. The default service ordering in
many settings is First-Come First-Served (FCFS)
service. Virtually all theoretical work on MSJ
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pai:2024:FCC,
author = "Meera Pai and Nikhil Karamchandani and Jayakrishnan
Nair",
title = "Fixed confidence community mode estimation",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "8--9",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649483",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649483",
abstract = "There are several interesting applications which are
based on sequentially sampling individuals from an
underlying population. Examples include online
cardinality estimation [1-4] where the goal is to
approximate the total size of the population and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saurav:2024:MAI,
author = "Kumar Saurav and Rahul Vaze",
title = "Minimizing Age of Information under Arbitrary Arrival
Model with Arbitrary Packet Size",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "10--11",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649484",
abstract = "In networked systems such as internet-of-things,
cyber-physical systems, etc., information timeliness is
a critical requirement. Timely updates ensure that the
information available at the subsystems (nodes) are
accurate, and actions are well-. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:2024:PGP,
author = "Yige Hong and Ziv Scully",
title = "Performance of the {Gittins} Policy in the {G/G/1} and
{G/G/$k$}, With and Without Setup Times",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "12--13",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649485",
abstract = "We consider the classic problem of preemptively
scheduling jobs in a queue to minimize mean
number-in-system, or equivalently mean response time.
Even in single-server queueing models, this can be a
nontrivial problem whose answer depends on the
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yeger:2024:ATQ,
author = "Yaron Yeger and Onno Boxma and Jacques Resing and
Maria Vlasiou",
title = "{ASIP} tandem queues with consumption",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "14--15",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649486",
abstract = "The Asymmetric Inclusion Process (ASIP) tandem queue
is a model of stations in series with a gate after each
station. At a gate opening, all customers in that
station instantaneously move to the next station
unidirectionally. We enhance the ASIP model by
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jhunjhunwala:2024:HTJ,
author = "Prakirt Raj Jhunjhunwala and Siva Theja Maguluri",
title = "Heavy Traffic Joint Queue Length Distribution without
Resource Pooling",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "16--17",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649487",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649487",
abstract = "This paper studies the Heavy Traffic (HT) joint
distribution of queue lengths in an Input-queued switch
(IQ switch) operating under the MaxWeight scheduling
policy. IQ switch serve as representative of SPNs that
do not satisfy the so-called Complete \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jhunjhunwala:2024:ETB,
author = "Prakirt Raj Jhunjhunwala and Daniela Hurtado-Lange and
Siva Theja Maguluri",
title = "Exponential Tail Bounds on Queues: a Confluence of
Non- Asymptotic Heavy Traffic and Large Deviations",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "18--19",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649488",
abstract = "In general, obtaining the exact steady-state
distribution of queue lengths is not feasible.
Therefore, we focus on establishing bounds for the tail
probabilities of queue lengths. We examine queueing
systems under Heavy Traffic (HT) conditions and
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2024:PSO,
author = "Hengquan Guo and Hongchen Cao and Jingzhu He and Xin
Liu and Yuanming Shi",
title = "{POBO}: Safe and Optimal Resource Management for Cloud
Microservices",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "20--21",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649489",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649489",
abstract = "Resource management in microservices is challenging
due to the uncertain latency-resource relationship,
dynamic environment, and strict Service-Level Agreement
(SLA) guarantees. This paper presents a Pessimistic and
Optimistic Bayesian Optimization \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Senapati:2024:OCO,
author = "Spandan Senapati and Rahul Vaze",
title = "Online Convex Optimization with Switching Cost and
Delayed Gradients",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "22--23",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649490",
abstract = "We consider the online convex optimization (OCO)
problem with quadratic and linear switching cost when
at time t only gradient information for functions
f$_T$, T {$<$} t is available. For L-smooth and \micro
-strongly convex objective functions, we propose an
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guan:2024:WNI,
author = "Xiaoding Guan and Noman Bashir and David Irwin and
Prashant Shenoy",
title = "{WattScope}: Non-intrusive Application-level Power
Disaggregation in Datacenters",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "24--25",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649491",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649491",
abstract = "WattScope is a system for non-intrusively estimating
the power consumption of individual applications using
external measurements of a server's aggregate power
usage and without requiring direct access to the
server's operating system or applications. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:2024:ORM,
author = "Zhisheng Zhao and Debankur Mukherjee",
title = "Optimal Rate-Matrix Pruning For Heterogeneous
Systems",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "26--27",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649492",
abstract = "We consider large-scale load balancing systems where
processing time distribution of tasks depend on both
task and server types. We analyze the system in the
asymptotic regime where the number of task and server
types tend to infinity proportionally to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Olliaro:2024:SMJ,
author = "Diletta Olliaro and Marco Ajmone Marsan and Simonetta
Balsamo and Andrea Marin",
title = "The Saturated Multiserver Job Queuing Model with Two
Classes of Jobs: Exact and Approximate Results",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "28--29",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649493",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649493",
abstract = "We study a Multiserver Job Queuing Model (MJQM), i.e.,
a FIFO (First-In-First-Out) queue in which jobs request
varying numbers of servers. Our investigation focuses
on the saturation analysis of MJQM with two job
classes. Jobs in the first class need \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agarwal:2024:RFP,
author = "Khushboo Agarwal and Veeraruna Kavitha",
title = "Robust fake-post detection against real-coloring
adversaries",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "30--31",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649494",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649494",
abstract = "We design warning mechanisms for detecting fake-post
on online social networks using crowd signals without
significantly affecting the real-post propagation.
Building on a recent algorithm where all users assign a
real or fake tag to any post, we now \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2024:BCD,
author = "Boyang Zhou and Isaac Howenstine and Liang Cheng and
Steffen Bondorf",
title = "Breaking Cyclic Dependencies for Network Calculus
using Service Partitioning",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "32--42",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649495",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649495",
abstract = "Network Calculus (NC) is a method for providing
certification evidence in networked systems, ensuring
proper functioning of time-critical traffic.
Traditional NC analyses focus on feedforward networks
that are networks without cyclic dependencies.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferragut:2024:QAI,
author = "Andres Ferragut and Fernando Paganini",
title = "Queueing analysis of imbalance between multiple server
pools with an application to 3-phase {EV} charging",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "43--53",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649496",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649496",
abstract = "We consider systems where multiple servers operate in
parallel, with a particular feature: servers are
classified into d classes, and we wish to keep
approximate balance between the load allocated to each
class. We introduce a relevant imbalance metric,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2024:FRT,
author = "Hai Duc Nguyen and Andrew A. Chien",
title = "A Foundation for Real-time Applications
on Function-as-a-Service",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "54--65",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649497",
abstract = "Serverless (or Function-as-a-Service) compute model
enables new applications with dynamic scaling. However,
all current Serverless systems are best-effort, and as
we prove this means they cannot guarantee hard
real-time deadlines, rendering them \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:2024:COP,
author = "Ying Cao and Siyuan Yu and Xiaoqi Tan and Danny H. K.
Tsang",
title = "Competitive Online Path-Aware Path Selection",
journal = j-SIGMETRICS,
volume = "51",
number = "4",
pages = "66--72",
month = mar,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3649477.3649498",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:47 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3649477.3649498",
abstract = "This paper studies an online path selection problem
and proposes online mechanisms for a network operator
to sequentially update link prices. The aim is to
incentivize online-arriving agents to join the network
and select paths in a manner that maximizes \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2024:ASI,
author = "Nicolas Gast and Benny {Van Houdt}",
title = "Approximations to Study the Impact of the Service
Discipline in Systems with Redundancy",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "1--2",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655045",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:48 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655045",
abstract = "In this paper we develop the first methods to
approximate the queue length distribution in a queueing
system with redundancy under various service
disciplines. We focus on a system with exponential job
sizes, i.i.d. copies, and a large number of servers.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cardinaels:2024:MDS,
author = "Ellen Cardinaels and Sem Borst and Johan S. H. van
Leeuwaarden",
title = "Multi-dimensional State Space Collapse in Non-complete
Resource Pooling Scenarios",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "3--4",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655067",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:48 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655067",
abstract = "We establish an explicit multi-dimensional state space
collapse (SSC) for parallel-processing systems with
arbitrary compatibility constraints between servers and
job types. This breaks major new ground beyond the SSC
results and queue length asymptotics \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2024:STO,
author = "George Yu and Ziv Scully",
title = "Strongly Tail-Optimal Scheduling in the Light-Tailed
{M/G/1}",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "5--6",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655084",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:48 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655084",
abstract = "We study the problem of scheduling jobs in a queueing
system, specifically an M/G/1 with light-tailed job
sizes, to asymptotically optimize the response time
tail. This means scheduling to make \mathbfP [T {$>$}
t], the chance a job's response time exceeds t,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xie:2024:HTO,
author = "Runhan Xie and Isaac Grosof and Ziv Scully",
title = "Heavy-Traffic Optimal Size- and State-Aware
Dispatching",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "7--8",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Jun 14 06:45:48 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655059",
abstract = "We study the problem of dispatching jobs to multiple
FCFS (First-Come, First-Served) queues. We consider the
case where the dispatcher is size-aware, meaning it
learns the size (i.e. service time) of each job as it
arrives; and state-aware, meaning it \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moura:2024:DDN,
author = "Giovane C. M. Moura and Marco Davids and Caspar
Schutijser and Cristian Hesselman and John Heidemann
and Georgios Smaragdakis",
title = "Deep Dive into {NTP Pool}'s Popularity and Mapping",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "9--10",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655051",
abstract = "Time synchronization is of paramount importance on the
Internet, with the Network Time Protocol (NTP) serving
as the primary synchronization protocol. The NTP Pool,
a volunteer-driven initiative launched two decades ago,
facilitates connections between \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lyu:2024:MMM,
author = "Minzhao Lyu and Rahul Dev Tripathi and Vijay
Sivaraman",
title = "{MetaVRadar}: Measuring Metaverse Virtual Reality
Network Activity",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "11--12",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655065",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655065",
abstract = "The ''metaverse'', wherein users can immerse in
virtual worlds through their VR headsets to work,
study, play, shop, socialize, and entertain, is fast
becoming a reality. However, little is known about the
network dynamics of metaverse VR applications,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ortiz:2024:SWE,
author = "Neil Ortiz and Alvaro A. Cardenas and Avishai Wool",
title = "{SCADA} World: an Exploration of the Diversity in
Power Grid Networks",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "13--14",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655078",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655078",
abstract = "Despite a growing interest in understanding the
industrial control networks that monitor and control
our critical infrastructures (such as the power grid),
to date, SCADA networks have been analyzed in isolation
from each other. They have been treated as \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Izhikevich:2024:DLS,
author = "Liz Izhikevich and Manda Tran and Katherine Izhikevich
and Gautam Akiwate and Zakir Durumeric",
title = "Democratizing {LEO} Satellite Network Measurement",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "15--16",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655052",
abstract = "Low Earth Orbit (LEO) satellite networks are quickly
gaining traction with promises of impressively low
latency, high bandwidth, and global reach. However, the
research community knows relatively little about their
operation and performance in practice. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dozier:2024:AFN,
author = "Kahlil Dozier and Loqman Salamatian and Dan
Rubenstein",
title = "Analysis of False Negative Rates for Recycling {Bloom}
Filters (Yes, They Happen!)",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "17--18",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655044",
abstract = "Bloom Filters are a desirable data structure for
distinguishing new values in sequences of data (i.e.,
messages), due to their space efficiency, their low
false positive rates (incorrectly classifying a new
value as a repeat), and never producing false
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mizrahi:2024:IBL,
author = "Avi Mizrahi and Daniella Bar-Lev and Eitan Yaakobi and
Ori Rottenstreich",
title = "Invertible {Bloom} Lookup Tables with Listing
Guarantees",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "19--20",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655060",
abstract = "The Invertible Bloom Lookup Table (IBLT) is a
probabilistic concise data structure for set
representation that supports a listing operation as the
recovery of the elements in the represented set. Its
applications can be found in network synchronization
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Monterubbiano:2024:LAR,
author = "Andrea Monterubbiano and Jonatan Langlet and Stefan
Walzer and Gianni Antichi and Pedro Reviriego and
Salvatore Pontarelli",
title = "Lightweight Acquisition and Ranging of Flows in the
Data Plane",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "21--22",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655063",
abstract = "As networks get more complex, the ability to track
almost all the flows is becoming of paramount
importance. This is because we can then detect
transient events impacting only a subset of the
traffic. Solutions for flow monitoring exist, but it is
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pandey:2024:TRT,
author = "Santosh Pandey and Amir Yazdanbakhsh and Hang Liu",
title = "{TAO}: Re-Thinking {DL}-based Microarchitecture
Simulation",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "23--24",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655085",
abstract = "Microarchitecture simulators are indispensable tools
for microarchitecture designers to validate, estimate,
and optimize new hardware that meets specific design
requirements. While the quest for a fast, accurate and
detailed microarchitecture simulation \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bakhshalipour:2024:AAS,
author = "Mohammad Bakhshalipour and Phillip B. Gibbons",
title = "Agents of Autonomy: a Systematic Study of Robotics on
Modern Hardware",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "25--26",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655043",
abstract = "As robots increasingly permeate modern society, it is
crucial for the system and hardware research community
to bridge its long-standing gap with robotics. This
divide has persisted due to the lack of (i) a
systematic performance evaluation of robotics
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Iyer:2024:ABA,
author = "Venkatraman Iyer and Sungho Lee and Semun Lee and
Juitem Joonwoo Kim and Hyunjun Kim and Youngjae Shin",
title = "Automated Backend Allocation for Multi-Model,
On-Device {AI} Inference",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "27--28",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655046",
abstract = "On-Device Artificial Intelligence (AI) services such
as face recognition, object tracking and voice
recognition are rapidly scaling up deployments on
embedded, memory-constrained hardware devices. These
services typically delegate AI inference models for
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2024:LOC,
author = "Qingsong Liu and Zhixuan Fang",
title = "Learning the Optimal Control for Evolving Systems with
Converging Dynamics",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "29--30",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655062",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655062",
abstract = "We consider a principle or controller that can pick
actions from a fixed action set to control an evolving
system with converging dynamics. The converging
dynamics means that, if the principle holds the same
action, the system will asymptotically \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pan:2024:SRE,
author = "Jiayu Pan and Yin Sun and Ness B. Shroff",
title = "Sampling for Remote Estimation of the {Wiener} Process
over an Unreliable Channel",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "31--32",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655077",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655077",
abstract = "In this paper, we study a sampling problem where a
source takes samples from a Wiener process and
transmits them through a wireless channel to a remote
estimator. Due to channel fading, interference, and
potential collisions, the packet transmissions are
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsanikidis:2024:NOP,
author = "Christos Tsanikidis and Javad Ghaderi",
title = "Near-Optimal Packet Scheduling in Multihop Networks
with End-to-End Deadline Constraints",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "33--34",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655069",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655069",
abstract = "Scheduling packets with end-to-end deadline
constraints in multihop networks is an important
problem that has been notoriously difficult to tackle.
Recently, there has been progress on this problem in
the worst-case traffic setting, with the objective of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2024:PCS,
author = "Yixuan Zhang and Dongyan (Lucy) Huo and Yudong Chen
and Qiaomin Xie",
title = "Prelimit Coupling and Steady-State Convergence of
Constant-stepsize Nonsmooth Contractive {SA}",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "35--36",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655076",
abstract = "Motivated by Q-learning, we study nonsmooth
contractive stochastic approximation (SA) with constant
stepsize. We focus on two important classes of
dynamics: (1) nonsmooth contractive SA with additive
noise, and (2) synchronous and asynchronous Q-learning,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2024:MLS,
author = "Huaifeng Zhang and Mohannad Alhanahnah and Fahmi
Abdulqadir Ahmed and Dyako Fatih and Philipp Leitner
and Ahmed Ali-Eldin",
title = "Machine Learning Systems are Bloated and Vulnerable",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "37--38",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655064",
abstract = "Today's software is bloated with both code and
features that are not used by most users. This bloat is
prevalent across the entire software stack, from
operating systems and applications to containers.
Containers are lightweight virtualization \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cheng:2024:TCA,
author = "Scott Cheng and Jun-Liang Lin and Murali Emani and
Siddhisanket Raskar and Sam Foreman and Zhen Xie and
Venkatram Vishwanath and Mahmut T. Kandemir",
title = "Thorough Characterization and Analysis of Large
Transformer Model Training At-Scale",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "39--40",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655087",
abstract = "Large transformer models have recently achieved great
success across various domains. With a growing number
of model parameters, a large transformer model training
today typically involves model sharding, data
parallelism, and model parallelism. Thus, \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pan:2024:HPT,
author = "Yicheng Pan and Yang Zhang and Tingzhu Bi and Linlin
Han and Yu Zhang and Meng Ma and Xiangzhuang Shen and
Xinrui Jiang and Feng Wang and Xian Liu and Ping Wang",
title = "{HEAL}: Performance Troubleshooting Deep inside Data
Center Hosts",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "41--42",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655058",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655058",
abstract = "This study demonstrates the salient facts and
challenges of host failure operations in hyperscale
data centers. A host incident can involve hundreds of
distinct host-level metrics. The faulting mechanism
inside the host connects these heterogeneous \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cai:2024:KVU,
author = "Peter Cai and Martin Karsten",
title = "Kernel vs. User-Level Networking: Don't Throw Out the
Stack with the Interrupts",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "43--44",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655061",
abstract = "This paper reviews the performance characteristics of
network stack processing for communication-heavy server
applications. Recent literature often describes
kernel-bypass and user-level networking as a silver
bullet to attain substantial performance \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lechowicz:2024:OCS,
author = "Adam Lechowicz and Nicolas Christianson and Bo Sun and
Noman Bashir and Mohammad Hajiesmaili and Adam Wierman
and Prashant Shenoy",
title = "Online Conversion with Switching Costs: Robust and
Learning-Augmented Algorithms",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "45--46",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655074",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655074",
abstract = "We introduce and study online conversion with
switching costs, a family of online problems that
capture emerging problems at the intersection of energy
and sustainability. In this problem, an online player
attempts to purchase (alternatively, sell) \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lechowicz:2024:OPR,
author = "Adam Lechowicz and Nicolas Christianson and Jinhang
Zuo and Noman Bashir and Mohammad Hajiesmaili and Adam
Wierman and Prashant Shenoy",
title = "The Online Pause and Resume Problem: Optimal
Algorithms and An Application to Carbon-Aware Load
Shifting",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "47--48",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655086",
abstract = "We introduce and study the online pause and resume
problem. In this problem, a player attempts to find the
k lowest (alternatively, highest) prices in a sequence
of fixed length T, which is revealed sequentially. At
each time step, the player is \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hanafy:2024:CLC,
author = "Walid A. Hanafy and Qianlin Liang and Noman Bashir and
David Irwin and Prashant Shenoy",
title = "{CarbonScaler}: Leveraging Cloud Workload Elasticity
for Optimizing Carbon-Efficiency",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "49--50",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655048",
abstract = "Due to inherent variations in energy's carbon
intensity, temporal shifting has become a key method in
reducing the carbon footprint of batch workloads.
However, temporally shifting workloads involves
searching for periods with lower carbon intensity,
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2024:OCP,
author = "Yudi Huang and Yilei Lin and Ting He",
title = "Optimized Cross-Path Attacks via Adversarial
Reconnaissance",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "51--52",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655075",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655075",
abstract = "While softwarization and virtualization technologies
make modern communication networks appear easier to
manage, they also introduce highly complex interactions
within the networks that can cause unexpected security
threats. In this work, we study a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anghel:2024:WGM,
author = "Radu Anghel and Yury Zhauniarovich and Carlos
Ga{\~n}{\'a}n",
title = "Who's Got My Back? {Measuring} the Adoption of an
{Internet}-wide {BGP RTBH} Service",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "53--54",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655090",
abstract = "Distributed Denial-of-Service (DDoS) attacks continue
to threaten the availability of Internet-based
services. While countermeasures exist to decrease the
impact of these attacks, not all operators have the
resources or knowledge to deploy them. Unwanted
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choo:2024:LSS,
author = "Euijin Choo and Mohamed Nabeel and Doowon Kim and
Ravindu {De Silva} and Ting Yu and Issa Khalil",
title = "A Large Scale Study and Classification of {VirusTotal}
Reports on Phishing and Malware {URLs}",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "55--56",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655042",
abstract = "VirusTotal (VT) is a widely used scanning service for
researchers and practitioners to label malicious
entities and predict new security threats.
Unfortunately, it is little known to the end-users how
VT URL scanners decide on the maliciousness of
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2024:OAR,
author = "Jianyi Yang and Pengfei Li and Mohammad J. Islam and
Shaolei Ren",
title = "Online Allocation with Replenishable Budgets: Worst
Case and Beyond",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "57--58",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655073",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655073",
abstract = "This paper studies online resource allocation with
replenishable budgets, where budgets can be replenished
on top of the initial budget and an agent sequentially
chooses online allocation decisions without violating
the available budget constraint at \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aslan:2024:FRA,
author = "Fatih Aslan and George Iosifidis and Jose A.
Ayala-Romero and Andres Garcia-Saavedra and Xavier
Costa-Perez",
title = "Fair Resource Allocation in Virtualized {O-RAN}
Platforms",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "59--60",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655054",
abstract = "O-RAN systems in virtualized platforms (O-Cloud) offer
performance boosts but also raise energy concerns. This
paper assesses O-Cloud's energy costs and proposes
energy-efficient policies for base station (BS) data
loads and transport block (TB) sizes. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2024:BNO,
author = "Lingdong Wang and Simran Singh and Jacob Chakareski
and Mohammad Hajiesmaili and Ramesh K. Sitaraman",
title = "{BONES}: Near-Optimal Neural-Enhanced Video
Streaming",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "61--62",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655047",
abstract = "Accessing high-quality video content can be
challenging due to insufficient and unstable network
bandwidth. Recent advances in neural enhancement have
shown promising results in improving the quality of
degraded videos through deep learning. Neural-.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jonatan:2024:SLP,
author = "Gilbert Jonatan and Haeyoon Cho and Hyojun Son and
Xiangyu Wu and Neal Livesay and Evelio Mora and
Kaustubh Shivdikar and Jos{\'e} L. Abell{\'a}n and Ajay
Joshi and David Kaeli and John Kim",
title = "Scalability Limitations of Processing-in-Memory using
Real System Evaluations",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "63--64",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655079",
abstract = "Processing-in-memory (PIM) has been widely explored in
academia and industry to accelerate numerous workloads.
By reducing the data movement and increasing
parallelism, PIM offers great performance and energy
efficiency. A large amount of cores or nodes \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bitchebe:2024:GEB,
author = "Stella Bitchebe and Yves Kone and Pierre Olivier and
Jalil Boukhobza and Y{\'e}rom-David Bromberg and Daniel
Hagimont and Alain Tchana",
title = "{GuaNary}: Efficient Buffer Overflow Detection In
Virtualized Clouds Using {Intel} {EPT}-based Sub-Page
Write Protection Support",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "65--66",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655056",
abstract = "Buffer overflow is a widespread memory safety
violation in C/C++, reported as the top vulnerability
in 2022. Secure memory allocators are generally used to
protect systems against attacks that may exploit buffer
overflows. Existing allocators mainly rely \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akbarzadeh:2024:HBH,
author = "Negar Akbarzadeh and Sina Darabi and Atiyeh
Gheibi-Fetrat and Amir Mirzaei and Mohammad Sadrosadati
and Hamid Sarbazi-Azad",
title = "A High-bandwidth High-capacity Hybrid {$3$D} Memory
for {GPUs}",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "67--68",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655057",
abstract = "GPUs execute thousands of active threads
simultaneously, requiring high memory bandwidth to
handle multiple memory requests efficiently. The memory
bandwidth in GPUs has always been increasing, but it is
still insufficient for the demands of fine-grained
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harris:2024:SDM,
author = "Keegan Harris and Anish Agarwal and Chara Podimata and
Zhiwei Steven Wu",
title = "Strategyproof Decision-Making in Panel Data Settings
and Beyond",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "69--70",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655083",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655083",
abstract = "We consider the problem of decision-making using panel
data, in which a decision-maker gets noisy, repeated
measurements of multiple units (or agents). We consider
the setup used in synthetic control methods, where
there is a pre-intervention period when \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dierks:2024:WSP,
author = "Ludwig Dierks and Makoto Yokoo",
title = "When Should Prices Stay Fixed? {On} the Chances and
Limitations of Spot Pricing in Larger Markets",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "71--72",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655089",
abstract = "Selling resources via auctions often seems
profit-optimal in theory. Yet in practice, providers
most often choose to sell homogeneous resources such as
cloud computing instances at fixed prices. While it has
been argued that this is explained by \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cheng:2024:CQB,
author = "Jin Cheng and Ningning Ding and John C. S. Lui and
Jianwei Huang",
title = "Continuous Query-based Data Trading",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "73--74",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655050",
abstract = "In the era of big data, traditional data trading
methods designed for one-time queries on static
databases fail to meet the demands of continuous
query-based trading on streaming data, often resulting
in repeated and inaccurate charges due to neglecting
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lo:2024:SVT,
author = "Lo, Chi-Jen (Roger) and Mahesh K. Marina and Nishanth
Sastry and Kai Xu and Saeed Fadaei and Yong Li",
title = "Shrinking {VOD} Traffic via {R{\'e}nyi}--Entropic
Optimal Transport",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "75--76",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655081",
abstract = "In response to the exponential surge in Video on
Demand (VOD) traffic, numerous research endeavors have
concentrated on optimizing and enhancing infrastructure
efficiency. In contrast, this paper explores whether
users' demand patterns can be shaped to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:2024:CLI,
author = "Ruizhe Shi and Ruizhi Cheng and Bo Han and Yue Cheng
and Songqing Chen",
title = "A Closer Look into {IPFS}: Accessibility, Content, and
Performance",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "77--78",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655040",
abstract = "The InterPlanetary File System (IPFS) has recently
gained considerable attention. While prior research has
focused on understanding its performance
characterization and application support, it remains
unclear: (1) what kind of files/content are stored in
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roy:2024:SMB,
author = "Rohan Basu Roy and Devesh Tiwari",
title = "{StarShip}: Mitigating {I/O} Bottlenecks in Serverless
Computing for Scientific Workflows",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "79--80",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655082",
abstract = "This work highlights the significance of I/O
bottlenecks that data-intensive HPC workflows face in
serverless environments --- an issue that has been
largely overlooked by prior works. We propose StarShip,
a framework that leverages different storage \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2024:NFA,
author = "Yiguang Zhang and Reetahan Mukhopadhyay and Augustin
Chaintreau",
title = "Network Fairness Ambivalence: When Does Social Network
Capital Mitigate or Amplify Unfairness?",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "81--82",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655072",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655072",
abstract = "What are the necessary and sufficient conditions under
which multi-hop dissemination strategies decrease
rather than increase inequity within social networks?
Our analysis of various strategies suggests that this
largely depends on a limit related to the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lindstaahl:2024:CPD,
author = "Simon Lindst{\aa}hl and Alexandre Proutiere and
Andreas Johnsson",
title = "Change Point Detection with Adaptive Measurement
Schedules for Network Performance Verification",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "83--84",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655049",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655049",
abstract = "When verifying that a communications network fulfills
its specified performance, it is critical to note
sudden shifts in network behavior as quickly as
possible. Change point detection methods can be useful
in this endeavor, but classical methods rely on
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2024:NND,
author = "Xi Jiang and Shinan Liu and Aaron Gember-Jacobson and
Arjun Nitin Bhagoji and Paul Schmitt and Francesco
Bronzino and Nick Feamster",
title = "{NetDiffusion}: Network Data Augmentation Through
Protocol-Constrained Traffic Generation",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "85--86",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655071",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655071",
abstract = "Datasets of labeled network traces are essential for a
multitude of machine learning (ML) tasks in networking,
yet their availability is hindered by privacy and
maintenance concerns, such as data staleness. To
overcome this limitation, synthetic network \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2024:MMM,
author = "Jintao Huang and Ningyu He and Kai Ma and Jiang Xiao
and Haoyu Wang",
title = "Miracle or Mirage? {A} Measurement Study of {NFT} Rug
Pulls",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "87--88",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655066",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655066",
abstract = "NFT rug pull is one of the most prominent type of NFT
scam, whose definition is that the developers of an NFT
project abandon it and run away with investors' funds.
Although they have drawn attention from our community,
to the best of our knowledge, the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2024:TUC,
author = "Kai Li and Shixuan Guan and Darren Lee",
title = "Towards Understanding and Characterizing the Arbitrage
Bot Scam In the Wild",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "89--90",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655088",
abstract = "This paper presents the first comprehensive analysis
of an emerging cryptocurrency scam named ``arbitrage
bot'' disseminated on online social networks. The scam
revolves around Decentralized Exchanges (DEX) arbitrage
and aims to lure victims into executing \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chu:2024:FLQ,
author = "Tianyue Chu and Nikolaos Laoutaris",
title = "{FedQV}: Leveraging Quadratic Voting in Federated
Learning",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "91--92",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655055",
abstract = "Federated Learning (FL) permits different parties to
collaboratively train a global model without disclosing
their respective local labels. A crucial step of FL,
that of aggregating local models to produce the global
one, shares many similarities with \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:2024:NOS,
author = "Yige Hong and Qiaomin Xie and Weina Wang",
title = "Near-Optimal Stochastic Bin-Packing in Large Service
Systems with Time-Varying Item Sizes",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "93--94",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655070",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655070",
abstract = "In modern computing systems, jobs' resource
requirements often vary over time. Accounting for this
temporal variability during job scheduling is essential
for meeting performance goals. However, theoretical
understanding on how to schedule jobs with time-.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rutten:2024:DSS,
author = "Daan Rutten and Martin Zubeldia and Debankur
Mukherjee",
title = "Distributed Speed Scaling in Large-Scale Service
Systems",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "95--96",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655053",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655053",
abstract = "We consider a large-scale parallel-server loss system
with an unknown arrival rate, where each server is able
to adjust its processing speed. The objective is to
minimize the system cost, which consists of a power
cost to maintain the servers' processing \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldsztajn:2024:SSS,
author = "Diego Goldsztajn and Sem C. Borst and Johan S. H. van
Leeuwaarden",
title = "Server Saturation in Skewed Networks",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "97--98",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655080",
abstract = "We use bipartite graphs to model compatibility
constraints that arise between tasks and servers in
data centers, cloud computing systems and content
delivery networks. We prove that servers with skewed
graph neighborhoods saturate with tasks in a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramanathan:2024:XIC,
author = "Alagappan Ramanathan and Rishika Sankaran and
Sangeetha Abdu Jyothi",
title = "{Xaminer}: an {Internet} Cross-Layer Resilience
Analysis Tool",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "99--100",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655091",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655091",
abstract = "A resilient Internet infrastructure is critical in our
highly interconnected society. However, the Internet
faces several vulnerabilities, ranging from natural
disasters to human activities, that can impact the
physical layer and, in turn, the higher \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramanathan:2024:NFC,
author = "Alagappan Ramanathan and Sangeetha Abdu Jyothi",
title = "{Nautilus}: a Framework for Cross-Layer Cartography of
Submarine Cables and {IP} Links",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "101--102",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655068",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655068",
abstract = "Submarine cables constitute the backbone of the
Internet. However, these critical infrastructure
components are vulnerable to several natural and
man-made threats, and during failures, are difficult to
repair in remote oceans. In spite of their crucial
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carisimo:2024:HEV,
author = "Esteban Carisimo and Caleb J. Wang and Mia Weaver and
Fabi{\'a}n Bustamante and Paul Barford",
title = "A Hop Away from Everywhere: a View of the
Intercontinental Long-haul Infrastructure",
journal = j-SIGMETRICS,
volume = "52",
number = "1",
pages = "103--104",
month = jun,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3673660.3655041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:43:39 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3673660.3655041",
abstract = "We present a longitudinal study of intercontinental
long-haul links (LHL) --- links with latencies
significantly higher than that of all other links in a
traceroute path. Our study is motivated by the
recognition of these LHLs as a network-layer \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2024:SIW,
author = "Mark S. Squillante",
title = "Special Issue on The Workshop on {MAthematical
performance Modeling and Analysis (MAMA 2024)}",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "2--2",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695413",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jazi:2024:OCG,
author = "Hossein Nekouyan Jazi and Faraz Zargari",
title = "Online Conversion with Group Fairness Constraints",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "3--5",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695414",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695414",
abstract = "In this paper, we initiate the study of an online
conversion problem that incorporates group fairness
guarantees. This problem aims to distribute a resource
with fixed capacity to a sequence of buyers based on
their offered prices. Each buyer belongs to \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Christianson:2024:RSO,
author = "Nicolas Christianson and Bo Sun and Steven Low and
Adam Wierman",
title = "Risk-Sensitive Online Algorithms",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "6--8",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695415",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695415",
abstract = "We study the design of risk-sensitive online
algorithms, in which risk measures are used in the
competitive analysis of randomized online algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferragut:2024:TBP,
author = "Andres Ferragut and Matias Carrasco and Fernando
Paganini",
title = "Timer-based pre-fetching for increasing hazard rates",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "9--11",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695416",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695416",
abstract = "Caching plays a crucial role in today's networks:
keeping popular content close to users reduces latency.
Timer-based caching policies (TTL) have long been used
to deal with bursts of requests, and their properties
are well understood. However, in some \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2024:HPQ,
author = "Zhouzi Li and Mor Harchol-Balter and Alan
Scheller-Wolf",
title = "Hybrid Priority Queue and its Applications",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "12--14",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695417",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695417",
abstract = "Priority queues are well understood in queueing
theory. However, they are somewhat restrictive in that
the low-priority customers suffer far greater waiting
times than the highpriority customers. In this short
paper, we introduce a novel generalization \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harlev:2024:GPO,
author = "Amit Harlev and George Yu and Ziv Scully",
title = "A Gittins Policy for Optimizing Tail Latency",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "15--17",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695418",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695418",
abstract = "Service level objectives (SLOs) for queueing systems
typically relate to the tail of the system's response
time distribution T. The tail is the function mapping a
time t to the probability P[T {$>$} t]. SLOs typically
ask that high percentiles of T are not \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramakrishna:2024:TAP,
author = "Shefali Ramakrishna and Ziv Scully",
title = "Transform Analysis of Preemption Overhead in the
{M/G/1}",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "18--20",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695419",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695419",
abstract = "Preemptive scheduling policies, which allow pausing
jobs mid-service, are ubiquitous because they allow
important jobs to receive service ahead of unimportant
jobs that would otherwise delay their completion. The
canonical example is Shortest Remaining \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Charlet:2024:TON,
author = "Nils Charlet and Benny Van Houdt",
title = "Tail Optimality of the {Nudge-$M$} Scheduling
Algorithm",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "21--23",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695420",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695420",
abstract = "Recently it was shown that the response time of
First-Come- First-Served (FCFS) scheduling can be
stochastically and asymptotically improved upon by the
Nudge scheduling algorithm in case of light-tailed job
size distributions. Such improvements are \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Grosof:2024:BMG,
author = "Isaac Grosof and Ziyuan Wang",
title = "Bounds on {M/G/$k$} Scheduling Under Moderate Load
Improving on {SRPT-$k$} and Tightening Lower Bounds",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "24--26",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695421",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695421",
abstract = "A well-designed scheduling policy can significantly
improve the performance of a queueing system, without
requiring any additional resources. While scheduling is
well-understood in the single-server setting, much less
is known in the multiserver setting. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2024:SPM,
author = "Zhongrui Chen and Isaac Grosof and Benjamin Berg",
title = "Simple Policies for Multiresource Job Scheduling",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "27--29",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695422",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695422",
abstract = "Data center workloads are composed of multiresource
jobs requiring a variety of computational resources
including CPU cores, memory, disk space, and hardware
accelerators. Modern servers can run multiple jobs in
parallel, but a set of jobs can only run \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2024:OSC,
author = "Larkin Liu and Shiqi Liu and Matej Jusup",
title = "Optimizing Stochastic Control through State Transition
Separability and Resource-Utility Exchange",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "30--32",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695423",
abstract = "In the realm of stochastic control, particularly in
the fields of economics and engineering, Markov
Decision Processes (MDP's) are employed to represent
various processes ranging from asset management to
transportation logistics. Upon closer examination
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pan:2024:NSB,
author = "Weici Pan and Zhenhua Liu",
title = "Non-stationary Bandits with Heavy Tail",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "33--35",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695424",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695424",
abstract = "In this study, we investigate the performance of
multi-armed bandit algorithms in environments
characterized by heavytailed and non-stationary reward
distributions, a setting that deviates from the
conventional risk-neutral and sub- Gaussian
assumptions. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sankagiri:2024:PRF,
author = "Suryanarayana Sankagiri and Bruce Hajek",
title = "Pricing for Routing and Flow-Control in Payment
Channel Networks",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "36--38",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695425",
abstract = "Blockchains are decentralized digital transaction
systems. Most blockchains today suffer from poor
transaction throughput, resulting in exorbitant
transaction fees and hindering widespread adoption.
Layer-two blockchain mechanisms are tools that allow
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2024:SSC,
author = "J. G. Dai and Jin Guang and Yaosheng Xu",
title = "Steady-State Convergence of the Continuous-Time {JSQ}
System with General Distributions in Heavy Traffic",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "39--41",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695426",
abstract = "This paper studies the continuous-time
join-the-shortestqueue (JSQ) system with general
interarrival and service distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kalantzis:2024:QAE,
author = "Vasileios Kalantzis and Mark S. Squillante and
Shashanka Ubaru",
title = "On Quantum Algorithms for Efficient Solutions of
General Classes of Structured {Markov} Processes",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "42--44",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695427",
abstract = "Multidimensional Markov processes arise in many
aspects of the mathematical performance analysis,
modeling and optimization of computer systems and
networks. Within this context, general classes of
structured Markov processes are of particular
importance \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Persone:2024:RTI,
author = "Vittoria {de Nitto Persone} and Y. C. Tay",
title = "Report on the {Third International Workshop on
Teaching Performance Analysis of Computer Systems
2024}",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "45--48",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695429",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695429",
abstract = "Teaching is one of the most essential activities of
academics, and leading knowledge and critical thinking
is crucial for a healthy and productive society.
However, the context is complex. The last two decades
were characterised by an economic crisis \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiems:2024:TPA,
author = "Dieter Fiems",
title = "Teaching performance analysis: essential skills
andlearning outcomes",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "49--52",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695430",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695430",
abstract = "In the age of machine learning, traditional
performance analysis courses face challenges such as
declining student interest and increasing competition
from courses within the respective study programmes. At
the same time, courses must accommodate \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meo:2024:WST,
author = "Michela Meo",
title = "Why Should {I} Teach Performance Evaluation to
Students in Networking?",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "53--57",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695431",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695431",
abstract = "In this contribution, we share our view on teaching
methodological topics in STEM disciplines and we report
our experience on teaching performance evaluation to
students in a M.Sc. focused on ICT for Smart Societies.
This program aims to equip students \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abad:2024:HCW,
author = "Cristina L. Abad",
title = "How can we Teach Workload Modeling in {CS} Systems
Classes?",
journal = j-SIGMETRICS,
volume = "52",
number = "2",
pages = "58--62",
month = sep,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3695411.3695432",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Mon Sep 9 16:38:23 MDT 2024",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3695411.3695432",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayesta:2024:FCA,
author = "Urtzi Ayesta",
title = "Foreword from {Chair of 2024 ACM SIGMETRICS Doctoral
Dissertation Award Committee}",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "2",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712172",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712172",
abstract = "The ACM SIGMETRICS Doctoral Dissertation Award
recognizes outstanding thesis research by doctoral
candidates in the field of performance evaluation
analysis of computer systems. Nominations for the 2024
award were sought from all faculty with graduating
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varma:2024:SMN,
author = "Sushil Mahavir Varma",
title = "Stochastic Matching Networks: Theory and Applications
to Matching Platforms",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "3--6",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712173",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712173",
abstract = "The past decade has witnessed an accelerated growth of
online marketplaces and the incorporation of electric
vehicles (EVs) in the fleet of transportation systems.
Online marketplaces are online platforms that
facilitate transactions between buyers and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldsztajn:2024:FLO,
author = "Diego Goldsztajn",
title = "Fluid Limits and Optimal Task Assignment Policies for
Locally Pooled Service Systems",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "7--10",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712174",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712174",
abstract = "Task assignment policies play a central role in many
online applications, where service requests or tasks
arrive over time and are distributed across parallel
servers in a data center or cloud computing platform.
The way in which the tasks are \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mishra:2024:UMI,
author = "Ayush Mishra",
title = "Understanding the Modern {Internet}'s Heterogeneous
Congestion Control Landscape",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "11--14",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712175",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712175",
abstract = "Research in Internet congestion control has
experienced a renaissance in recent years, driven by
two key developments. In 2016, Google introduced and
deployed BBR, a congestion control algorithm that marks
a significant departure from traditional loss-.
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akem:2024:UPA,
author = "Aristide Tanyi-Jong Akem",
title = "User-Plane Algorithms for Stateless and Stateful
Inference in Programmable Networks",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "15--18",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712177",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712177",
abstract = "In the last decade, the complexity of networks has
increased significantly to accommodate the rise of
innovative applications. This growing complexity has
rendered traditional human-in-the-loop network
management approaches inadequate, necessitating
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Olliaro:2024:MTM,
author = "Diletta Olliaro",
title = "Models for Throughput Maximisation in Distributed
Systems",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "19--22",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712178",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712178",
abstract = "In today's rapidly advancing computing and
telecommunications landscape, analysing the performance
of distributed systems is more critical than ever. As
systems grow in complexity, the demand for robust
analytical tools to evaluate efficiency and \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2024:LOR,
author = "I-Cheng Lin",
title = "Learning and Optimization over Robust Networked
Systems",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "23--26",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712179",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712179",
abstract = "Research Summary: 1 Introduction Networked systems are
ubiquitous in our daily lives, playing a critical role
across a wide range of scientific fields, including
communication, machine learning, optimization, control,
biology, economics, and social \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Malakhov:2024:ATC,
author = "Ivan Malakhov",
title = "Analysis of the Transaction Confirmation Process and
Fairness in Proof-of-Work Blockchains",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "27--30",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712180",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/bitcoin.bib;
https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712180",
abstract = "In recent years, blockchain technology has emerged as
a pivotal tool for implementing distributed ledgers.
This thesis provides an in-depth exploration of
blockchains that rely on one of the most widely adopted
consensus mechanisms: Proof-of-Work (PoW). \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2024:ECA,
author = "Jingdi Chen",
title = "Exploration, Collaboration, and Applications in
Multi-Agent Reinforcement Learning",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "31--34",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712181",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712181",
abstract = "Research Summary: In recent years, the field of
human-centric decision-making has emerged as a critical
area of research, driven by its potential to
fundamentally reshape how decisions are made across a
variety of complex systems. Human-centric
decision- \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chamberlain:2024:GTF,
author = "Jonathan Chamberlain",
title = "Game Theoretic Frameworks for Spectrum Coexistence in
Advanced Wireless Networks",
journal = j-SIGMETRICS,
volume = "52",
number = "3",
pages = "35--38",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712182",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712182",
abstract = "The research can be summarized into two broad areas,
with overlap due to proliferation of edge compute
clusters in 5G networks: (a) flexible resource
allocations in cloud environments and (b) interactions
between user agents in wireless spectrum \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2024:SRO,
author = "Xutong Liu",
title = "Scalable and Robust Online Learning for {AI}-powered
Networked Systems",
journal = "send feedback icon",
volume = "52",
number = "3",
pages = "39--42",
month = dec,
year = "2024",
CODEN = "????",
DOI = "https://doi.org/10.1145/3712170.3712183",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:40:26 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3712170.3712183",
abstract = "In today's world of pervasive connectivity and
intelligent technologies, modern networked
systems-ranging from sprawling data centers to
large-scale Internet of Things (IoT) systems-have grown
by leaps and bounds, unlocking numerous transformative
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2025:ASS,
author = "Nicolas Gast and Lishan Yang",
title = "{ACM SIGMETRICS 2024 Student Research Competition}",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "2",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725538",
abstract = "Every year, the Association for Computing Machinery
(ACM) spearheads a series of Student Research
Competitions (SRCs) at ACM-sponsored or co-sponsored
conferences. These SRCs provide graduate (Master's or
PhD program) and undergraduate students an \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aimuyo:2025:APO,
author = "Osayamen J. Aimuyo",
title = "{Aristos}: Pipelining One-sided Communication in
Distributed Mixture of Experts",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "3--5",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725539",
abstract = "We propose Aristos, a communication-optimal,
distributed algorithm that uses asynchronous
communication interleaved with computation to
specifically tackle the communication overhead of
Distributed Mixture-of-Experts (DMoE) transformer
models. DMoE, as \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guida:2025:TPP,
author = "Ciro Guida",
title = "Toward Privacy-Preserving Training of Generative {AI}
Models for Network Traffic Classification",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "6--8",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725540",
abstract = "Synthetic traffic traces are useful for training
traffic classifiers in privacy-constrained
environments. Generative Artificial Intelligence (GAI)
models are blossoming as a solution to avoid the
sharing of real data and the lack of datasets. Never
the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kuroda:2025:DBH,
author = "Daichi Kuroda",
title = "Differentiating between Hierarchical and Flat
Communities",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "9--10",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725541",
abstract = "As data proliferate in the form of pairwise
interactions or networks-from social media exchanges
and physical infrastructures, like railways and the
internet, to biological systems-extracting meaningful
insights remains a significant challenge. Community
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rodio:2025:MFV,
author = "Angelo Rodio",
title = "The Many Facets of Variance Reduction in Federated
Learning",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "11--12",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725542",
abstract = "Federated Learning (FL) enables clients (mobile or IoT
devices) to train a shared machine learning model
coordinated by a central server while keeping their
data local, addressing communication and privacy
concerns. In the FedAvg algorithm [2], clients
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramakrishna:2025:TAP,
author = "Shefali Ramakrishna",
title = "Transform Analysis of Preemption Overhead in the
{M/G/1}",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "13--14",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725543",
abstract = "Preemptive scheduling policies are ubiquitous in
queueing theory [3], but analysis of such policies has
not touched one important aspect: preemption overhead,
the extra work to pause and resume a preempted job.
Such an analysis is difficult because \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xiang:2025:TCA,
author = "Ming Xiang",
title = "Taming Client Availability in Federated Learning in
the Presence of Arbitrary and Unknown Dynamics",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "15--16",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725544",
abstract = "Federated learning (FL) is a prominent distributed
learning framework that allows clients to train machine
learning models orchestrated by a parameter server (PS)
[3]. Unfortunately, its practical implementation is
fundamentally hindered by heterogeneous \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2025:TCM,
author = "Yiyang Wang",
title = "Time-Continuous Modeling of {Zipfian} Workload
Locality",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "17--18",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725545",
abstract = "Traditional workload analysis uses discrete times
measured by data accesses, including the subarea of
workloads with stochastic and independent accesses. A
precise analysis in this flavor is the classic
independent reference model (IRM) (King, 1971)
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{HAUSEUX:2025:HCW,
author = "Louis HAUSEUX",
title = "How can we theoretically measure the performance of
density-based clustering algorithms?",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "19--20",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725546",
abstract = "Many of clustering algorithms for a point cloud X$_n$
\subset R$^d$ in the Euclidean space are based on
density estimates [1]. In fact, the density function f
of point generation contains the relevant information.
It is quite natural to try to extract what Hartigan
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{ElMimouni:2025:WIB,
author = "Ibtihal {El Mimouni}",
title = "{Whittle} Index-Based {$Q$}-Learning for Contextual
Restless Bandits: a Case Study in Email Marketing",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "21--22",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725547",
abstract = "Email marketing has become an essential tool for
businesses to reach out to potential customers [16].
However, the use of mass marketing raises ethical and
environmental issues. In fact, this practice tarnishes
domain reputation with potential spamming \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2025:TQN,
author = "Jason Han and Tirthak Patel",
title = "Turning Quantum Noise on its Head: Using the Noise for
Diffusion Models to Generate Images",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "23--24",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725548",
abstract = "In this work, we propose positively using noise from
quantum computers, which is currently viewed as a
hindrance for performing useful computation, instead of
simulated noise to train generative image diffusion
models, which have two primary advantages: \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2025:MIP,
author = "Yuncheng Yao",
title = "{MLFD}: The Implementation and Performance Evaluation
of an {LSTM}-based, {SmartNIC}-Offloadable Failure
Detector",
journal = j-SIGMETRICS,
volume = "52",
number = "4",
pages = "25--27",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725549",
abstract = "This paper explores the feasibility of using machine
learning algorithms for failure detection services. Our
implementation and profiling results show that a
DPDK-based failure detector (FD) using long short-term
neural network performs well in terms of \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2025:DSM,
author = "Jonatha Anselmi and Josu Doncel",
title = "Dispatching and scheduling multi-server jobs for
throughput optimality",
journal = "send feedback icon",
volume = "52",
number = "4",
pages = "28--32",
month = mar,
year = "2025",
CODEN = "????",
DOI = "https://doi.org/10.1145/3725536.3725551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Mar 19 07:44:11 MDT 2025",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/3725536.3725551",
abstract = "We consider the problem of dispatching and scheduling
an infinite stream of multiple classes of jobs to a set
of single-server parallel queues. Each job requires the
simultaneous utilization of multiple servers. Our
objective is to identify a dispatching \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}